From 1b2547843817b4b7adbeb87ea9b070d9cac35c90 Mon Sep 17 00:00:00 2001 From: Peng Tao Date: Mon, 17 Jun 2013 17:33:12 +0800 Subject: [PATCH] LU-1346 libcfs: replace cfs_ memory wrappers Replace memory relevant wrappers with kernel API. Affected primitives: CFS_PAGE_SIZE, CFS_PAGE_SHIFT, cfs_num_physpages, cfs_copy_from_user, cfs_copy_to_user, cfs_page_address, cfs_kmap/cfs_kunmap, cfs_get_page, cfs_page_count, cfs_page_index, cfs_page_pin, cfs_page_unpin, cfs_memory_pressure_get/set/clr, CFS_NUM_CACHEPAGES, CFS_ALLOC_XXX flags, cfs_alloc/free, cfs_alloc/free_large, cfs_alloc/free_page, CFS_DECL_MMSPACE, CFS_MMSPACE_OPEN, CFS_MMSPACE_CLOSE, CFS_SLAB_XXX flags, cfs_shrinker_t, cfs_set/remove_shrinker, CFS_DEFAULT_SEEKS, cfs_mem_cache_t, cfs_mem_cache_alloc/free/create/destroy, cfs_mem_is_in_cache manual changes: 1. cfs_alloc_flags_to_gfp() is removed 2. remove kmalloc/kfree etc. from linux-mem.c and linux-mem.h 3. remove page_address/kmap/kunmap etc. from linux-mem.h 4. remove page_cache_get/page_cache_release from echo_internal.h. They are defined already in user-mem.h 5. change kmem_cache_create/destroy prototype to kernel's and modify all callers to match them 6. define _SPL_KMEM_H and related macros to avoid using spl's sys/kmem.h that redefines slab allocator 7. change kmem_virt to is_vmalloc_addr as provided by kernel, so that we don't use any spl's sys/kmem.h functions 8. clean up include files a little bit in osd-zfs 9. various coding style cleanup NUMA allocators(cfs_cpt_xxx) are not changed in this patch. gnilnd is not converted, as requested by James Simmons. Signed-off-by: Liu Xuezhao Signed-off-by: Peng Tao Change-Id: Iadfbb0d5a0e31c78dd6c811e5ffdb468fa7e6f44 Reviewed-on: http://review.whamcloud.com/2831 Tested-by: Hudson Tested-by: Maloo Reviewed-by: Oleg Drokin --- contrib/scripts/libcfs_cleanup.sed | 183 ++++++++++---------- libcfs/include/libcfs/darwin/darwin-mem.h | 122 +++++++++----- libcfs/include/libcfs/libcfs.h | 33 ---- libcfs/include/libcfs/libcfs_crypto.h | 2 +- libcfs/include/libcfs/libcfs_prim.h | 28 +--- libcfs/include/libcfs/libcfs_private.h | 44 ++--- libcfs/include/libcfs/libcfs_string.h | 2 +- libcfs/include/libcfs/linux/kp30.h | 2 +- libcfs/include/libcfs/linux/linux-mem.h | 118 +++---------- libcfs/include/libcfs/posix/libcfs.h | 12 +- libcfs/include/libcfs/user-mem.h | 135 +++++++++------ libcfs/include/libcfs/winnt/portals_utils.h | 16 +- libcfs/include/libcfs/winnt/winnt-mem.h | 117 ++++++++----- libcfs/include/libcfs/winnt/winnt-prim.h | 14 +- libcfs/include/libcfs/winnt/winnt-tcpip.h | 4 +- libcfs/libcfs/darwin/darwin-mem.c | 235 +++++++++++++------------- libcfs/libcfs/darwin/darwin-tcpip.c | 8 +- libcfs/libcfs/darwin/darwin-tracefile.c | 2 +- libcfs/libcfs/debug.c | 2 +- libcfs/libcfs/heap.c | 2 +- libcfs/libcfs/kernel_user_comm.c | 4 +- libcfs/libcfs/libcfs_string.c | 2 +- libcfs/libcfs/linux/linux-crypto.c | 14 +- libcfs/libcfs/linux/linux-curproc.c | 10 +- libcfs/libcfs/linux/linux-mem.c | 153 +---------------- libcfs/libcfs/linux/linux-tcpip.c | 12 +- libcfs/libcfs/linux/linux-tracefile.c | 2 +- libcfs/libcfs/lwt.c | 54 +++--- libcfs/libcfs/module.c | 168 +++++++++---------- libcfs/libcfs/posix/posix-debug.c | 14 +- libcfs/libcfs/tracefile.c | 118 ++++++------- libcfs/libcfs/tracefile.h | 10 +- libcfs/libcfs/user-crypto.c | 14 +- libcfs/libcfs/user-mem.c | 36 ++-- libcfs/libcfs/winnt/winnt-curproc.c | 45 ++--- libcfs/libcfs/winnt/winnt-fs.c | 36 ++-- libcfs/libcfs/winnt/winnt-mem.c | 250 +++++++++++++--------------- libcfs/libcfs/winnt/winnt-module.c | 6 +- libcfs/libcfs/winnt/winnt-prim.c | 56 +++---- libcfs/libcfs/winnt/winnt-proc.c | 202 +++++++++++----------- libcfs/libcfs/winnt/winnt-tcpip.c | 50 +++--- libcfs/libcfs/winnt/winnt-tracefile.c | 14 +- libcfs/libcfs/winnt/winnt-usr.c | 2 +- libcfs/libcfs/winnt/winnt-utils.c | 8 +- lnet/include/lnet/darwin/lib-types.h | 2 +- lnet/include/lnet/types.h | 20 +-- lnet/klnds/o2iblnd/o2iblnd.c | 4 +- lnet/klnds/ptllnd/ptllnd.c | 4 +- lnet/klnds/ptllnd/ptllnd.h | 2 +- lnet/klnds/ptllnd/ptllnd_rx_buf.c | 4 +- lnet/klnds/socklnd/socklnd_lib-darwin.c | 16 +- lnet/klnds/socklnd/socklnd_lib-linux.c | 3 +- lnet/lnet/api-ni.c | 6 +- lnet/lnet/lib-md.c | 2 +- lnet/lnet/lib-move.c | 164 +++++++++--------- lnet/lnet/router.c | 10 +- lnet/lnet/router_proc.c | 8 +- lnet/selftest/brw_test.c | 32 ++-- lnet/selftest/conctl.c | 195 ++++++++++------------ lnet/selftest/conrpc.c | 68 ++++---- lnet/selftest/console.c | 44 ++--- lnet/selftest/framework.c | 8 +- lnet/selftest/rpc.c | 22 +-- lnet/selftest/selftest.h | 8 +- lnet/utils/lst.c | 2 +- lustre/fld/fld_cache.c | 2 +- lustre/include/cl_object.h | 22 +-- lustre/include/lclient.h | 10 +- lustre/include/liblustre.h | 4 +- lustre/include/lu_object.h | 4 +- lustre/include/lustre/lustre_idl.h | 6 +- lustre/include/lustre_capa.h | 2 +- lustre/include/lustre_debug.h | 4 +- lustre/include/lustre_disk.h | 6 +- lustre/include/lustre_idmap.h | 2 +- lustre/include/lustre_lib.h | 12 +- lustre/include/lustre_net.h | 22 +-- lustre/include/obd.h | 64 +++---- lustre/include/obd_class.h | 4 +- lustre/include/obd_support.h | 55 +++--- lustre/lclient/lcommon_cl.c | 40 ++--- lustre/ldlm/ldlm_extent.c | 6 +- lustre/ldlm/ldlm_internal.h | 2 +- lustre/ldlm/ldlm_lib.c | 28 ++-- lustre/ldlm/ldlm_lock.c | 11 +- lustre/ldlm/ldlm_lockd.c | 71 ++++---- lustre/ldlm/ldlm_pool.c | 48 +++--- lustre/ldlm/ldlm_request.c | 2 +- lustre/ldlm/ldlm_resource.c | 6 +- lustre/lfsck/lfsck_namespace.c | 2 +- lustre/liblustre/dir.c | 6 +- lustre/liblustre/llite_cl.c | 13 +- lustre/liblustre/llite_lib.h | 4 +- lustre/liblustre/super.c | 4 +- lustre/liblustre/tests/sanity.c | 24 +-- lustre/llite/dir.c | 70 ++++---- lustre/llite/file.c | 6 +- lustre/llite/llite_internal.h | 18 +- lustre/llite/llite_lib.c | 22 +-- lustre/llite/llite_mmap.c | 7 +- lustre/llite/lloop.c | 18 +- lustre/llite/lproc_llite.c | 78 ++++----- lustre/llite/remote_perm.c | 8 +- lustre/llite/rw.c | 25 +-- lustre/llite/rw26.c | 23 +-- lustre/llite/super25.c | 114 ++++++------- lustre/llite/vvp_dev.c | 34 ++-- lustre/llite/vvp_internal.h | 6 +- lustre/llite/vvp_io.c | 21 +-- lustre/llite/vvp_page.c | 132 +++++++-------- lustre/lmv/lmv_obd.c | 20 +-- lustre/lmv/lproc_lmv.c | 2 +- lustre/lod/lod_dev.c | 4 +- lustre/lod/lod_object.c | 2 +- lustre/lov/lov_cl_internal.h | 26 +-- lustre/lov/lov_dev.c | 24 +-- lustre/lov/lov_ea.c | 2 +- lustre/lov/lov_internal.h | 2 +- lustre/lov/lov_lock.c | 6 +- lustre/lov/lov_obd.c | 31 ++-- lustre/lov/lov_object.c | 6 +- lustre/lov/lov_pack.c | 6 +- lustre/lov/lov_page.c | 8 +- lustre/lov/lovsub_dev.c | 2 +- lustre/lov/lovsub_lock.c | 2 +- lustre/lov/lovsub_object.c | 2 +- lustre/lov/lovsub_page.c | 2 +- lustre/lvfs/fsfilt_ext3.c | 40 ++--- lustre/mdc/mdc_request.c | 28 ++-- lustre/mdd/mdd_device.c | 2 +- lustre/mdd/mdd_dir.c | 2 +- lustre/mdd/mdd_lproc.c | 44 ++--- lustre/mdd/mdd_object.c | 12 +- lustre/mdt/mdt_handler.c | 23 +-- lustre/mdt/mdt_lproc.c | 18 +- lustre/mgc/mgc_request.c | 70 ++++---- lustre/mgs/mgs_handler.c | 6 +- lustre/mgs/mgs_nids.c | 54 +++--- lustre/obdclass/capa.c | 42 ++--- lustre/obdclass/cl_lock.c | 4 +- lustre/obdclass/cl_object.c | 4 +- lustre/obdclass/cl_page.c | 18 +- lustre/obdclass/class_obd.c | 16 +- lustre/obdclass/debug.c | 2 +- lustre/obdclass/dt_object.c | 4 +- lustre/obdclass/genops.c | 92 +++++----- lustre/obdclass/linkea.c | 2 +- lustre/obdclass/linux/linux-module.c | 14 +- lustre/obdclass/linux/linux-obdo.c | 4 +- lustre/obdclass/linux/linux-sysctl.c | 86 +++++----- lustre/obdclass/lprocfs_status.c | 24 +-- lustre/obdclass/lu_object.c | 26 ++- lustre/obdclass/lu_ref.c | 6 +- lustre/obdecho/echo.c | 123 +++++++------- lustre/obdecho/echo_client.c | 104 ++++++------ lustre/obdecho/echo_internal.h | 7 - lustre/ofd/ofd_dev.c | 4 +- lustre/ofd/ofd_fmd.c | 12 +- lustre/ofd/ofd_internal.h | 4 +- lustre/ofd/ofd_obd.c | 6 +- lustre/osc/lproc_osc.c | 46 ++--- lustre/osc/osc_cache.c | 51 +++--- lustre/osc/osc_cl_internal.h | 16 +- lustre/osc/osc_dev.c | 38 ++--- lustre/osc/osc_internal.h | 2 +- lustre/osc/osc_io.c | 4 +- lustre/osc/osc_lock.c | 20 +-- lustre/osc/osc_object.c | 2 +- lustre/osc/osc_page.c | 10 +- lustre/osc/osc_request.c | 94 +++++------ lustre/osd-ldiskfs/osd_handler.c | 8 +- lustre/osd-ldiskfs/osd_internal.h | 4 +- lustre/osd-ldiskfs/osd_io.c | 20 +-- lustre/osd-ldiskfs/osd_lproc.c | 2 +- lustre/osd-ldiskfs/osd_quota_fmt.c | 4 +- lustre/osd-zfs/osd_handler.c | 2 +- lustre/osd-zfs/osd_internal.h | 9 + lustre/osd-zfs/osd_io.c | 20 +-- lustre/osd-zfs/osd_lproc.c | 1 - lustre/osd-zfs/osd_object.c | 4 +- lustre/osd-zfs/osd_quota.c | 1 - lustre/osd-zfs/udmu.h | 1 - lustre/osp/osp_dev.c | 4 +- lustre/osp/osp_internal.h | 2 +- lustre/osp/osp_md_object.c | 2 +- lustre/ost/ost_handler.c | 6 +- lustre/ost/ost_internal.h | 2 +- lustre/ptlrpc/client.c | 22 +-- lustre/ptlrpc/events.c | 2 +- lustre/ptlrpc/gss/gss_cli_upcall.c | 80 ++++----- lustre/ptlrpc/gss/gss_krb5_mech.c | 4 +- lustre/ptlrpc/gss/gss_pipefs.c | 4 +- lustre/ptlrpc/import.c | 2 +- lustre/ptlrpc/lproc_ptlrpc.c | 13 +- lustre/ptlrpc/nrs.c | 2 +- lustre/ptlrpc/nrs_crr.c | 6 +- lustre/ptlrpc/nrs_orr.c | 20 +-- lustre/ptlrpc/pers.c | 4 +- lustre/ptlrpc/ptlrpc_internal.h | 2 +- lustre/ptlrpc/recover.c | 2 +- lustre/ptlrpc/sec_bulk.c | 90 +++++----- lustre/ptlrpc/sec_plain.c | 26 +-- lustre/quota/lquota_entry.c | 2 +- lustre/quota/lquota_internal.h | 2 +- lustre/quota/lquota_lib.c | 2 +- lustre/quota/qsd_internal.h | 2 +- lustre/quota/qsd_lib.c | 2 +- lustre/quota/qsd_reint.c | 16 +- lustre/quota/qsd_request.c | 4 +- lustre/quota/qsd_writeback.c | 4 +- lustre/tests/checkfiemap.c | 1 + lustre/utils/lustre_cfg.c | 16 +- 212 files changed, 2794 insertions(+), 3014 deletions(-) diff --git a/contrib/scripts/libcfs_cleanup.sed b/contrib/scripts/libcfs_cleanup.sed index 61a8253..7aea56f 100644 --- a/contrib/scripts/libcfs_cleanup.sed +++ b/contrib/scripts/libcfs_cleanup.sed @@ -256,91 +256,100 @@ s/\bCFS_DTTOIF\b/DTTOIF/g ################################################################################ # memory operations - -#s/\bcfs_page_t\b/struct page/g -#s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g -#/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d -#s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g -#/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d -#s/\bCFS_PAGE_MASK\b/PAGE_CACHE_MASK/g -#/#[ \t]*define[ \t]*\bPAGE_CACHE_MASK\b[ \t]*\bPAGE_CACHE_MASK\b/d -#s/\bcfs_num_physpages\b/num_physpages/g -#/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d -#s/\bcfs_copy_from_user\b/copy_from_user/g -#/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d -#s/\bcfs_copy_to_user\b/copy_to_user/g -#/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d -#s/\bcfs_page_address\b/page_address/g -#/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d -#s/\bcfs_kmap\b/kmap/g -#/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d -#s/\bcfs_kunmap\b/kunmap/g -#/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d -#s/\bcfs_get_page\b/get_page/g -#/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d -#s/\bcfs_page_count\b/page_count/g -#/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d -#s/\bcfs_page_index\b/page_index/g -#/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d -#s/\bcfs_page_pin\b/page_cache_get/g -#/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d -#s/\bcfs_page_unpin\b/page_cache_release/g -#/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d -#s/\bcfs_memory_pressure_get\b/memory_pressure_get/g -#s/\bcfs_memory_pressure_set\b/memory_pressure_set/g -#s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g -#s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g -# memory allocator -#s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g -#/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d -#s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g -#/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d -#s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g -#/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d -#s/\bCFS_ALLOC_FS\b/__GFP_FS/g -#/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d -#s/\bCFS_ALLOC_IO\b/__GFP_IO/g -#/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d -#s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g -#/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d -#s/\bCFS_ALLOC_STD\b/GFP_IOFS/g -#/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d -#s/\bCFS_ALLOC_USER\b/GFP_KERNEL/g -#/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d -#s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g -#/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d -#s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g -#/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d -#s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g -#s/\bcfs_alloc\b/kmalloc/g -#/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d -#s/\bcfs_free\b/kfree/g -#/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d -#s/\bcfs_alloc_large\b/vmalloc/g -#/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d -#s/\bcfs_free_large\b/vfree/g -#/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d -#s/\bcfs_alloc_page\b/alloc_page/g -#/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d -#s/\bcfs_free_page\b/__free_page/g -#/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d +s/\bcfs_page_t\b/struct page/g +/typedef[ \t]*\bstruct page\b[ \t]*\bstruct page\b/d +s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g +/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d +s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g +/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d +s/\bcfs_num_physpages\b/num_physpages/g +/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d +s/\bcfs_copy_from_user\b/copy_from_user/g +/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d +s/\bcfs_copy_to_user\b/copy_to_user/g +/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d +s/\bcfs_page_address\b/page_address/g +/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d +s/\bcfs_kmap\b/kmap/g +/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d +s/\bcfs_kunmap\b/kunmap/g +/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d +s/\bcfs_get_page\b/get_page/g +/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d +s/\bcfs_page_count\b/page_count/g +/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d +s/\bcfs_page_index\b/page_index/g +/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d +s/\bcfs_page_pin\b/page_cache_get/g +/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d +s/\bcfs_page_unpin\b/page_cache_release/g +/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d +s/\bcfs_memory_pressure_get\b/memory_pressure_get/g +s/\bcfs_memory_pressure_set\b/memory_pressure_set/g +s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g +s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g + # memory allocator +s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g +/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d +s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g +/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d +s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g +/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d +s/\bCFS_ALLOC_FS\b/__GFP_FS/g +/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d +s/\bCFS_ALLOC_IO\b/__GFP_IO/g +/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d +s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g +/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d +s/\bCFS_ALLOC_STD\b/GFP_IOFS/g +/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d +s/\bCFS_ALLOC_USER\b/GFP_USER/g +/#[ \t]*define[ \t]*\bGFP_USER\b[ \t]*\bGFP_USER\b/d +s/\bCFS_ALLOC_KERNEL\b/GFP_KERNEL/g +/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d +s/\bCFS_ALLOC_NOFS\b/GFP_NOFS/g +/#[ \t]*define[ \t]*\bGFP_NOFS\b[ \t]*\bGFP_NOFS\b/d +s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g +/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d +s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g +/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d +s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g +s/\bcfs_alloc\b/kmalloc/g +/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d +s/\bcfs_free\b/kfree/g +/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d +s/\bcfs_alloc_large\b/vmalloc/g +/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d +s/\bcfs_free_large\b/vfree/g +/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d +s/\bcfs_alloc_page\b/alloc_page/g +/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d +s/\bcfs_free_page\b/__free_page/g +/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d # TODO: SLAB allocator -#s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g -#s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g -#s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g -#s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g -#/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d -#s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g -#/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d -#s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g -#/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d -#s/\bcfs_shrinker\b/shrinker/g -#/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d -#s/\bcfs_shrinker_t\b/struct shrinkert/g -#/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d -#s/\bcfs_set_shrinker\b/set_shrinker/g -#/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d -#s/\bcfs_remove_shrinker\b/remove_shrinker/g -#/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d -#s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g -#/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d +s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g +s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g +s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g +s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g +/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d +s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g +/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d +s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g +/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d +s/\bcfs_shrinker\b/shrinker/g +/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d +s/\bcfs_shrinker_t\b/shrinker_t/g +/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d +s/\bcfs_set_shrinker\b/set_shrinker/g +/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d +s/\bcfs_remove_shrinker\b/remove_shrinker/g +/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d +s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g +/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d +s/cfs_mem_cache_t/struct kmem_cache/g +s/cfs_mem_cache_create/kmem_cache_create/g +s/\w+[ =]*cfs_mem_cache_destroy/kmem_cache_destroy/g +s/cfs_mem_cache_destroy/kmem_cache_destroy/g +s/cfs_mem_cache_alloc/kmem_cache_alloc/g +s/cfs_mem_cache_free/kmem_cache_free/g +s/cfs_mem_is_in_cache/kmem_is_in_cache/g diff --git a/libcfs/include/libcfs/darwin/darwin-mem.h b/libcfs/include/libcfs/darwin/darwin-mem.h index ebaf064..326268b 100644 --- a/libcfs/include/libcfs/darwin/darwin-mem.h +++ b/libcfs/include/libcfs/darwin/darwin-mem.h @@ -74,14 +74,14 @@ /* Variable sized pages are not supported */ #ifdef PAGE_SHIFT -#define CFS_PAGE_SHIFT PAGE_SHIFT +#define PAGE_CACHE_SHIFT PAGE_SHIFT #else -#define CFS_PAGE_SHIFT 12 +#define PAGE_CACHE_SHIFT 12 #endif -#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT) +#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT) -#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE - 1)) +#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE - 1)) enum { XNU_PAGE_RAW, @@ -101,23 +101,23 @@ typedef __u32 page_off_t; * - "xll" pages (XNU_PAGE_XLL): these are used by file system to cache * file data, owned by file system objects, hashed, lrued, etc. * - * cfs_page_t has to cover both of them, because core Lustre code is based on + * struct page has to cover both of them, because core Lustre code is based on * the Linux assumption that page is _both_ memory buffer and file system * caching entity. * * To achieve this, all types of pages supported on XNU has to start from - * common header that contains only "page type". Common cfs_page_t operations + * common header that contains only "page type". Common struct page operations * dispatch through operation vector based on page type. * */ typedef struct xnu_page { int type; -} cfs_page_t; +} struct page; struct xnu_page_ops { - void *(*page_map) (cfs_page_t *); - void (*page_unmap) (cfs_page_t *); - void *(*page_address) (cfs_page_t *); + void *(*page_map) (struct page *); + void (*page_unmap) (struct page *); + void *(*page_address) (struct page *); }; void xnu_page_ops_register(int type, struct xnu_page_ops *ops); @@ -136,44 +136,81 @@ struct xnu_raw_page { /* * Public interface to lustre * - * - cfs_alloc_page(f) - * - cfs_free_page(p) - * - cfs_kmap(p) - * - cfs_kunmap(p) - * - cfs_page_address(p) + * - alloc_page(f) + * - __free_page(p) + * - kmap(p) + * - kunmap(p) + * - page_address(p) */ /* - * Of all functions above only cfs_kmap(), cfs_kunmap(), and - * cfs_page_address() can be called on file system pages. The rest is for raw + * Of all functions above only kmap(), kunmap(), and + * page_address() can be called on file system pages. The rest is for raw * pages only. */ -cfs_page_t *cfs_alloc_page(u_int32_t flags); -void cfs_free_page(cfs_page_t *page); -void cfs_get_page(cfs_page_t *page); -int cfs_put_page_testzero(cfs_page_t *page); -int cfs_page_count(cfs_page_t *page); -#define cfs_page_index(pg) (0) +struct page *alloc_page(u_int32_t flags); +void __free_page(struct page *page); +void get_page(struct page *page); +int cfs_put_page_testzero(struct page *page); +int page_count(struct page *page); +#define page_index(pg) (0) -void *cfs_page_address(cfs_page_t *pg); -void *cfs_kmap(cfs_page_t *pg); -void cfs_kunmap(cfs_page_t *pg); +void *page_address(struct page *pg); +void *kmap(struct page *pg); +void kunmap(struct page *pg); /* * Memory allocator */ -void *cfs_alloc(size_t nr_bytes, u_int32_t flags); -void cfs_free(void *addr); +void *kmalloc(size_t nr_bytes, u_int32_t flags); +void kfree(void *addr); -void *cfs_alloc_large(size_t nr_bytes); -void cfs_free_large(void *addr); +void *vmalloc(size_t nr_bytes); +void vfree(void *addr); extern int get_preemption_level(void); -#define CFS_ALLOC_ATOMIC_TRY \ - (get_preemption_level() != 0 ? CFS_ALLOC_ATOMIC : 0) +/* + * Universal memory allocator API + */ +enum cfs_alloc_flags { + /* allocation is not allowed to block */ + GFP_ATOMIC = 0x1, + /* allocation is allowed to block */ + __GFP_WAIT = 0x2, + /* allocation should return zeroed memory */ + __GFP_ZERO = 0x4, + /* allocation is allowed to call file-system code to free/clean + * memory */ + __GFP_FS = 0x8, + /* allocation is allowed to do io to free/clean memory */ + __GFP_IO = 0x10, + /* don't report allocation failure to the console */ + __GFP_NOWARN = 0x20, + /* standard allocator flag combination */ + GFP_IOFS = __GFP_FS | __GFP_IO, + GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO, + GFP_NOFS = __GFP_WAIT | __GFP_IO, + GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS, +}; + +/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */ +enum cfs_alloc_page_flags { + /* allow to return page beyond KVM. It has to be mapped into KVM by + * kmap() and unmapped with kunmap(). */ + __GFP_HIGHMEM = 0x40, + GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO | + __GFP_HIGHMEM, +}; + +#define ALLOC_ATOMIC_TRY \ + (get_preemption_level() != 0 ? GFP_ATOMIC : 0) + +#define memory_pressure_get() (0) +#define memory_pressure_set() do {} while (0) +#define memory_pressure_clr() do {} while (0) /* * Slab: @@ -207,31 +244,32 @@ typedef zone_t mem_cache_t; #define MC_NAME_MAX_LEN 64 -typedef struct cfs_mem_cache { +struct kmem_cache { int mc_size; mem_cache_t mc_cache; struct list_head mc_link; char mc_name [MC_NAME_MAX_LEN]; -} cfs_mem_cache_t; +}; #define KMEM_CACHE_MAX_COUNT 64 #define KMEM_MAX_ZONE 8192 -cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long); -int cfs_mem_cache_destroy ( cfs_mem_cache_t * ); -void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int); -void cfs_mem_cache_free ( cfs_mem_cache_t *, void *); +struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, void *); +void kmem_cache_destroy(struct kmem_cache *); +void *kmem_cache_alloc(struct kmem_cache *, int); +void kmem_cache_free(struct kmem_cache *, void *); /* * Misc */ /* XXX Liang: num_physpages... fix me */ #define num_physpages (64 * 1024) -#define CFS_NUM_CACHEPAGES num_physpages +#define NUM_CACHEPAGES num_physpages -#define CFS_DECL_MMSPACE -#define CFS_MMSPACE_OPEN do {} while(0) -#define CFS_MMSPACE_CLOSE do {} while(0) +#define DECL_MMSPACE +#define MMSPACE_OPEN do {} while (0) +#define MMSPACE_CLOSE do {} while (0) #define copy_from_user(kaddr, uaddr, size) copyin(CAST_USER_ADDR_T(uaddr), (caddr_t)kaddr, size) #define copy_to_user(uaddr, kaddr, size) copyout((caddr_t)kaddr, CAST_USER_ADDR_T(uaddr), size) diff --git a/libcfs/include/libcfs/libcfs.h b/libcfs/include/libcfs/libcfs.h index 34c36a3..c44b8a7 100644 --- a/libcfs/include/libcfs/libcfs.h +++ b/libcfs/include/libcfs/libcfs.h @@ -185,39 +185,6 @@ struct cfs_psdev_ops { }; /* - * Universal memory allocator API - */ -enum cfs_alloc_flags { - /* allocation is not allowed to block */ - CFS_ALLOC_ATOMIC = 0x1, - /* allocation is allowed to block */ - CFS_ALLOC_WAIT = 0x2, - /* allocation should return zeroed memory */ - CFS_ALLOC_ZERO = 0x4, - /* allocation is allowed to call file-system code to free/clean - * memory */ - CFS_ALLOC_FS = 0x8, - /* allocation is allowed to do io to free/clean memory */ - CFS_ALLOC_IO = 0x10, - /* don't report allocation failure to the console */ - CFS_ALLOC_NOWARN = 0x20, - /* standard allocator flag combination */ - CFS_ALLOC_STD = CFS_ALLOC_FS | CFS_ALLOC_IO, - CFS_ALLOC_USER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO, - CFS_ALLOC_NOFS = CFS_ALLOC_WAIT | CFS_ALLOC_IO, - CFS_ALLOC_KERNEL = CFS_ALLOC_WAIT | CFS_ALLOC_IO | CFS_ALLOC_FS, -}; - -/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */ -enum cfs_alloc_page_flags { - /* allow to return page beyond KVM. It has to be mapped into KVM by - * cfs_kmap() and unmapped with cfs_kunmap(). */ - CFS_ALLOC_HIGHMEM = 0x40, - CFS_ALLOC_HIGHUSER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO | - CFS_ALLOC_HIGHMEM, -}; - -/* * Drop into debugger, if possible. Implementation is provided by platform. */ diff --git a/libcfs/include/libcfs/libcfs_crypto.h b/libcfs/include/libcfs/libcfs_crypto.h index 291191a..64ca62f 100644 --- a/libcfs/include/libcfs/libcfs_crypto.h +++ b/libcfs/include/libcfs/libcfs_crypto.h @@ -157,7 +157,7 @@ struct cfs_crypto_hash_desc* * @retval 0 for success. */ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc, - cfs_page_t *page, unsigned int offset, + struct page *page, unsigned int offset, unsigned int len); /** Update digest by part of data. diff --git a/libcfs/include/libcfs/libcfs_prim.h b/libcfs/include/libcfs/libcfs_prim.h index 4a6588e..1f066e9 100644 --- a/libcfs/include/libcfs/libcfs_prim.h +++ b/libcfs/include/libcfs/libcfs_prim.h @@ -90,31 +90,21 @@ cfs_time_t cfs_timer_deadline(cfs_timer_t *t); /* * Memory */ -#ifndef cfs_memory_pressure_get -#define cfs_memory_pressure_get() (0) -#endif -#ifndef cfs_memory_pressure_set -#define cfs_memory_pressure_set() do {} while (0) -#endif -#ifndef cfs_memory_pressure_clr -#define cfs_memory_pressure_clr() do {} while (0) -#endif - static inline int cfs_memory_pressure_get_and_set(void) { - int old = cfs_memory_pressure_get(); + int old = memory_pressure_get(); - if (!old) - cfs_memory_pressure_set(); - return old; + if (!old) + memory_pressure_set(); + return old; } static inline void cfs_memory_pressure_restore(int old) { - if (old) - cfs_memory_pressure_set(); - else - cfs_memory_pressure_clr(); - return; + if (old) + memory_pressure_set(); + else + memory_pressure_clr(); + return; } #endif diff --git a/libcfs/include/libcfs/libcfs_private.h b/libcfs/include/libcfs/libcfs_private.h index 921431a..bf75401 100644 --- a/libcfs/include/libcfs/libcfs_private.h +++ b/libcfs/include/libcfs/libcfs_private.h @@ -154,14 +154,14 @@ do { \ #endif /* LIBCFS_DEBUG */ #ifndef LIBCFS_VMALLOC_SIZE -#define LIBCFS_VMALLOC_SIZE (2 << CFS_PAGE_SHIFT) /* 2 pages */ +#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ #endif #define LIBCFS_ALLOC_PRE(size, mask) \ do { \ LASSERT(!cfs_in_interrupt() || \ ((size) <= LIBCFS_VMALLOC_SIZE && \ - ((mask) & CFS_ALLOC_ATOMIC)) != 0); \ + ((mask) & GFP_ATOMIC)) != 0); \ } while (0) #define LIBCFS_ALLOC_POST(ptr, size) \ @@ -186,7 +186,7 @@ do { \ do { \ LIBCFS_ALLOC_PRE((size), (mask)); \ (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \ - cfs_alloc((size), (mask)) : cfs_alloc_large(size); \ + kmalloc((size), (mask)) : vmalloc(size); \ LIBCFS_ALLOC_POST((ptr), (size)); \ } while (0) @@ -194,13 +194,13 @@ do { \ * default allocator */ #define LIBCFS_ALLOC(ptr, size) \ - LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_IO) + LIBCFS_ALLOC_GFP(ptr, size, __GFP_IO) /** * non-sleeping allocator */ #define LIBCFS_ALLOC_ATOMIC(ptr, size) \ - LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_ATOMIC) + LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC) /** * allocate memory for specified CPU partition @@ -218,23 +218,23 @@ do { \ /** default numa allocator */ #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \ - LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, CFS_ALLOC_IO) + LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO) -#define LIBCFS_FREE(ptr, size) \ -do { \ - int s = (size); \ - if (unlikely((ptr) == NULL)) { \ - CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ - "%s:%d\n", s, __FILE__, __LINE__); \ - break; \ - } \ - libcfs_kmem_dec((ptr), s); \ - CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \ +#define LIBCFS_FREE(ptr, size) \ +do { \ + int s = (size); \ + if (unlikely((ptr) == NULL)) { \ + CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ + "%s:%d\n", s, __FILE__, __LINE__); \ + break; \ + } \ + libcfs_kmem_dec((ptr), s); \ + CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \ s, (ptr), libcfs_kmem_read()); \ - if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \ - cfs_free_large(ptr); \ - else \ - cfs_free(ptr); \ + if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \ + vfree(ptr); \ + else \ + kfree(ptr); \ } while (0) /******************************************************************************/ @@ -586,8 +586,8 @@ int cfs_match_nid(lnet_nid_t nid, cfs_list_t *list); struct libcfs_device_userstate { - int ldu_memhog_pages; - cfs_page_t *ldu_memhog_root_page; + int ldu_memhog_pages; + struct page *ldu_memhog_root_page; }; /* what used to be in portals_lib.h */ diff --git a/libcfs/include/libcfs/libcfs_string.h b/libcfs/include/libcfs/libcfs_string.h index 36961a0..e91396c 100644 --- a/libcfs/include/libcfs/libcfs_string.h +++ b/libcfs/include/libcfs/libcfs_string.h @@ -51,7 +51,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit), int *oldmask, int minmask, int allmask); /* Allocate space for and copy an existing string. - * Must free with cfs_free(). + * Must free with kfree(). */ char *cfs_strdup(const char *str, u_int32_t flags); diff --git a/libcfs/include/libcfs/linux/kp30.h b/libcfs/include/libcfs/linux/kp30.h index 6319efa..de4f27b 100644 --- a/libcfs/include/libcfs/linux/kp30.h +++ b/libcfs/include/libcfs/linux/kp30.h @@ -216,7 +216,7 @@ extern lwt_cpu_t lwt_cpus[]; #define LWTSTR(n) #n #define LWTWHERE(f,l) f ":" LWTSTR(l) -#define LWT_EVENTS_PER_PAGE (CFS_PAGE_SIZE / sizeof (lwt_event_t)) +#define LWT_EVENTS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lwt_event_t)) #define LWT_EVENT(p1, p2, p3, p4) \ do { \ diff --git a/libcfs/include/libcfs/linux/linux-mem.h b/libcfs/include/libcfs/linux/linux-mem.h index 1d70be8..0ece47d 100644 --- a/libcfs/include/libcfs/linux/linux-mem.h +++ b/libcfs/include/libcfs/linux/linux-mem.h @@ -57,123 +57,52 @@ # include #endif -typedef struct page cfs_page_t; -#define CFS_PAGE_SIZE PAGE_CACHE_SIZE -#define CFS_PAGE_SHIFT PAGE_CACHE_SHIFT -#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1)) +#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1)) -#define cfs_num_physpages num_physpages +#define page_index(p) ((p)->index) -#define cfs_copy_from_user(to, from, n) copy_from_user(to, from, n) -#define cfs_copy_to_user(to, from, n) copy_to_user(to, from, n) -static inline void *cfs_page_address(cfs_page_t *page) -{ - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - return page_address(page); -} - -static inline void *cfs_kmap(cfs_page_t *page) -{ - return kmap(page); -} - -static inline void cfs_kunmap(cfs_page_t *page) -{ - kunmap(page); -} - -static inline void cfs_get_page(cfs_page_t *page) -{ - get_page(page); -} - -static inline int cfs_page_count(cfs_page_t *page) -{ - return page_count(page); -} - -#define cfs_page_index(p) ((p)->index) - -#define cfs_page_pin(page) page_cache_get(page) -#define cfs_page_unpin(page) page_cache_release(page) - -/* - * Memory allocator - * XXX Liang: move these declare to public file - */ -extern void *cfs_alloc(size_t nr_bytes, u_int32_t flags); -extern void cfs_free(void *addr); - -extern void *cfs_alloc_large(size_t nr_bytes); -extern void cfs_free_large(void *addr); - -extern cfs_page_t *cfs_alloc_page(unsigned int flags); -extern void cfs_free_page(cfs_page_t *page); - -#define cfs_memory_pressure_get() (current->flags & PF_MEMALLOC) -#define cfs_memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0) -#define cfs_memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0) +#define memory_pressure_get() (current->flags & PF_MEMALLOC) +#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0) +#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0) #if BITS_PER_LONG == 32 /* limit to lowmem on 32-bit systems */ -#define CFS_NUM_CACHEPAGES \ - min(cfs_num_physpages, 1UL << (30 - CFS_PAGE_SHIFT) * 3 / 4) +#define NUM_CACHEPAGES \ + min(num_physpages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4) #else -#define CFS_NUM_CACHEPAGES cfs_num_physpages +#define NUM_CACHEPAGES num_physpages #endif /* * In Linux there is no way to determine whether current execution context is * blockable. */ -#define CFS_ALLOC_ATOMIC_TRY CFS_ALLOC_ATOMIC +#define ALLOC_ATOMIC_TRY GFP_ATOMIC +/* GFP_IOFS was added in 2.6.33 kernel */ +#ifndef GFP_IOFS +#define GFP_IOFS (__GFP_IO | __GFP_FS) +#endif -/* - * SLAB allocator - * XXX Liang: move these declare to public file - */ -typedef struct kmem_cache cfs_mem_cache_t; -extern cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long); -extern int cfs_mem_cache_destroy ( cfs_mem_cache_t * ); -extern void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int); -extern void cfs_mem_cache_free ( cfs_mem_cache_t *, void *); -extern int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem); - -#define CFS_DECL_MMSPACE mm_segment_t __oldfs -#define CFS_MMSPACE_OPEN \ +#define DECL_MMSPACE mm_segment_t __oldfs +#define MMSPACE_OPEN \ do { __oldfs = get_fs(); set_fs(get_ds());} while(0) -#define CFS_MMSPACE_CLOSE set_fs(__oldfs) +#define MMSPACE_CLOSE set_fs(__oldfs) -#define CFS_SLAB_HWCACHE_ALIGN SLAB_HWCACHE_ALIGN -#define CFS_SLAB_KERNEL SLAB_KERNEL -#define CFS_SLAB_NOFS SLAB_NOFS -/* - * NUMA allocators - * - * NB: we will rename these functions in a separate patch: - * - rename cfs_alloc to cfs_malloc - * - rename cfs_alloc/free_page to cfs_page_alloc/free - * - rename cfs_alloc/free_large to cfs_vmalloc/vfree - */ extern void *cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes, unsigned int flags); extern void *cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes); -extern cfs_page_t *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, +extern struct page *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, unsigned int flags); -extern void *cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep, +extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab, int cpt, unsigned int flags); /* * Shrinker */ -#define cfs_shrinker shrinker #ifdef HAVE_SHRINK_CONTROL # define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask) \ @@ -193,10 +122,10 @@ extern void *cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep, #endif #ifdef HAVE_REGISTER_SHRINKER -typedef int (*cfs_shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)); +typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)); static inline -struct cfs_shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func) +struct shrinker *set_shrinker(int seek, shrinker_t func) { struct shrinker *s; @@ -213,7 +142,7 @@ struct cfs_shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func) } static inline -void cfs_remove_shrinker(struct cfs_shrinker *shrinker) +void remove_shrinker(struct shrinker *shrinker) { if (shrinker == NULL) return; @@ -221,11 +150,6 @@ void cfs_remove_shrinker(struct cfs_shrinker *shrinker) unregister_shrinker(shrinker); kfree(shrinker); } -#else -typedef shrinker_t cfs_shrinker_t; -#define cfs_set_shrinker(s, f) set_shrinker(s, f) -#define cfs_remove_shrinker(s) remove_shrinker(s) #endif -#define CFS_DEFAULT_SEEKS DEFAULT_SEEKS #endif /* __LINUX_CFS_MEM_H__ */ diff --git a/libcfs/include/libcfs/posix/libcfs.h b/libcfs/include/libcfs/posix/libcfs.h index 305b74b..4a11e71 100644 --- a/libcfs/include/libcfs/posix/libcfs.h +++ b/libcfs/include/libcfs/posix/libcfs.h @@ -277,21 +277,21 @@ static inline int cfs_module_refcount(cfs_module_t *m) * ***************************************************************************/ -struct cfs_shrinker { +struct shrinker { ; }; -#define CFS_DEFAULT_SEEKS (0) +#define DEFAULT_SEEKS (0) -typedef int (*cfs_shrinker_t)(int, unsigned int); +typedef int (*shrinker_t)(int, unsigned int); static inline -struct cfs_shrinker *cfs_set_shrinker(int seeks, cfs_shrinker_t shrink) +struct shrinker *set_shrinker(int seeks, shrinker_t shrink) { - return (struct cfs_shrinker *)0xdeadbea1; // Cannot return NULL here + return (struct shrinker *)0xdeadbea1; /* Cannot return NULL here */ } -static inline void cfs_remove_shrinker(struct cfs_shrinker *shrinker) +static inline void remove_shrinker(struct shrinker *shrinker) { } diff --git a/libcfs/include/libcfs/user-mem.h b/libcfs/include/libcfs/user-mem.h index e48b124..8ab9c25 100644 --- a/libcfs/include/libcfs/user-mem.h +++ b/libcfs/include/libcfs/user-mem.h @@ -37,7 +37,7 @@ */ #define LIBLUSTRE_HANDLE_UNALIGNED_PAGE -typedef struct page { +struct page { void *addr; unsigned long index; cfs_list_t list; @@ -50,92 +50,129 @@ typedef struct page { int _managed; #endif cfs_list_t _node; -} cfs_page_t; +}; /* 4K */ -#define CFS_PAGE_SHIFT 12 -#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT) -#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1)) - -cfs_page_t *cfs_alloc_page(unsigned int flags); -void cfs_free_page(cfs_page_t *pg); -void *cfs_page_address(cfs_page_t *pg); -void *cfs_kmap(cfs_page_t *pg); -void cfs_kunmap(cfs_page_t *pg); - -#define cfs_get_page(p) __I_should_not_be_called__(at_all) -#define cfs_page_count(p) __I_should_not_be_called__(at_all) -#define cfs_page_index(p) ((p)->index) -#define cfs_page_pin(page) do {} while (0) -#define cfs_page_unpin(page) do {} while (0) +#define PAGE_CACHE_SHIFT 12 +#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT) +#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1)) + +struct page *alloc_page(unsigned int flags); +void __free_page(struct page *pg); +void *page_address(struct page *pg); +void *kmap(struct page *pg); +void kunmap(struct page *pg); + +#define get_page(p) __I_should_not_be_called__(at_all) +#define page_count(p) __I_should_not_be_called__(at_all) +#define page_index(p) ((p)->index) +#define page_cache_get(page) do { } while (0) +#define page_cache_release(page) do { } while (0) /* * Memory allocator * Inline function, so utils can use them without linking of libcfs */ -#define __ALLOC_ZERO (1 << 2) -static inline void *cfs_alloc(size_t nr_bytes, u_int32_t flags) + +/* + * Universal memory allocator API + */ +enum cfs_alloc_flags { + /* allocation is not allowed to block */ + GFP_ATOMIC = 0x1, + /* allocation is allowed to block */ + __GFP_WAIT = 0x2, + /* allocation should return zeroed memory */ + __GFP_ZERO = 0x4, + /* allocation is allowed to call file-system code to free/clean + * memory */ + __GFP_FS = 0x8, + /* allocation is allowed to do io to free/clean memory */ + __GFP_IO = 0x10, + /* don't report allocation failure to the console */ + __GFP_NOWARN = 0x20, + /* standard allocator flag combination */ + GFP_IOFS = __GFP_FS | __GFP_IO, + GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO, + GFP_NOFS = __GFP_WAIT | __GFP_IO, + GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS, +}; + +/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */ +enum cfs_alloc_page_flags { + /* allow to return page beyond KVM. It has to be mapped into KVM by + * kmap() and unmapped with kunmap(). */ + __GFP_HIGHMEM = 0x40, + GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO | + __GFP_HIGHMEM, +}; + +static inline void *kmalloc(size_t nr_bytes, u_int32_t flags) { - void *result; + void *result; - result = malloc(nr_bytes); - if (result != NULL && (flags & __ALLOC_ZERO)) - memset(result, 0, nr_bytes); - return result; + result = malloc(nr_bytes); + if (result != NULL && (flags & __GFP_ZERO)) + memset(result, 0, nr_bytes); + return result; } -#define cfs_free(addr) free(addr) -#define cfs_alloc_large(nr_bytes) cfs_alloc(nr_bytes, 0) -#define cfs_free_large(addr) cfs_free(addr) +#define kfree(addr) free(addr) +#define vmalloc(nr_bytes) kmalloc(nr_bytes, 0) +#define vfree(addr) free(addr) -#define CFS_ALLOC_ATOMIC_TRY (0) +#define ALLOC_ATOMIC_TRY (0) /* * SLAB allocator */ -typedef struct { +struct kmem_cache { int size; -} cfs_mem_cache_t; +}; -#define CFS_SLAB_HWCACHE_ALIGN 0 +#define SLAB_HWCACHE_ALIGN 0 #define SLAB_DESTROY_BY_RCU 0 -#define CFS_SLAB_KERNEL 0 -#define CFS_SLAB_NOFS 0 +#define SLAB_KERNEL 0 +#define SLAB_NOFS 0 + +#define memory_pressure_get() (0) +#define memory_pressure_set() do {} while (0) +#define memory_pressure_clr() do {} while (0) -cfs_mem_cache_t * -cfs_mem_cache_create(const char *, size_t, size_t, unsigned long); -int cfs_mem_cache_destroy(cfs_mem_cache_t *c); -void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp); -void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr); -int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem); +struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, void *); +void kmem_cache_destroy(struct kmem_cache *c); +void *kmem_cache_alloc(struct kmem_cache *c, int gfp); +void kmem_cache_free(struct kmem_cache *c, void *addr); +int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem); /* * NUMA allocators */ #define cfs_cpt_malloc(cptab, cpt, bytes, flags) \ - cfs_alloc(bytes, flags) + kmalloc(bytes, flags) #define cfs_cpt_vmalloc(cptab, cpt, bytes) \ - cfs_alloc(bytes) + kmalloc(bytes) #define cfs_page_cpt_alloc(cptab, cpt, mask) \ - cfs_alloc_page(mask) + alloc_page(mask) #define cfs_mem_cache_cpt_alloc(cache, cptab, cpt, gfp) \ - cfs_mem_cache_alloc(cache, gfp) + kmem_cache_alloc(cache, gfp) #define smp_rmb() do {} while (0) /* * Copy to/from user */ -static inline int cfs_copy_from_user(void *a,void *b, int c) +static inline int copy_from_user(void *a, void *b, int c) { - memcpy(a,b,c); - return 0; + memcpy(a, b, c); + return 0; } -static inline int cfs_copy_to_user(void *a,void *b, int c) +static inline int copy_to_user(void *a, void *b, int c) { - memcpy(a,b,c); - return 0; + memcpy(a,b,c); + return 0; } #endif diff --git a/libcfs/include/libcfs/winnt/portals_utils.h b/libcfs/include/libcfs/winnt/portals_utils.h index e5fc164..927f515 100644 --- a/libcfs/include/libcfs/winnt/portals_utils.h +++ b/libcfs/include/libcfs/winnt/portals_utils.h @@ -265,16 +265,16 @@ static inline void read_random(char *buf, int len) ((unsigned char *)&addr)[1], \ ((unsigned char *)&addr)[0] -static int cfs_copy_from_user(void *to, void *from, int c) +static int copy_from_user(void *to, void *from, int c) { - memcpy(to, from, c); - return 0; + memcpy(to, from, c); + return 0; } -static int cfs_copy_to_user(void *to, const void *from, int c) +static int copy_to_user(void *to, const void *from, int c) { - memcpy(to, from, c); - return 0; + memcpy(to, from, c); + return 0; } static unsigned long @@ -297,8 +297,8 @@ clear_user(void __user *to, unsigned long n) 0 \ ) -#define cfs_num_physpages (64 * 1024) -#define CFS_NUM_CACHEPAGES cfs_num_physpages +#define num_physpages (64 * 1024) +#define NUM_CACHEPAGES num_physpages #else diff --git a/libcfs/include/libcfs/winnt/winnt-mem.h b/libcfs/include/libcfs/winnt/winnt-mem.h index 2435915..e13dd59 100644 --- a/libcfs/include/libcfs/winnt/winnt-mem.h +++ b/libcfs/include/libcfs/winnt/winnt-mem.h @@ -49,24 +49,26 @@ #ifdef __KERNEL__ -typedef struct cfs_mem_cache cfs_mem_cache_t; - /* * page definitions */ -#define CFS_PAGE_SIZE PAGE_SIZE -#define CFS_PAGE_SHIFT PAGE_SHIFT +#define PAGE_CACHE_SIZE PAGE_SIZE +#define PAGE_CACHE_SHIFT PAGE_SHIFT #define CFS_PAGE_MASK (~(PAGE_SIZE - 1)) -typedef struct cfs_page { +#define memory_pressure_get() (0) +#define memory_pressure_set() do {} while (0) +#define memory_pressure_clr() do {} while (0) + +struct page { void * addr; cfs_atomic_t count; void * private; void * mapping; __u32 index; __u32 flags; -} cfs_page_t; +}; #define page cfs_page @@ -146,90 +148,115 @@ typedef struct cfs_page { #define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \ &(page)->flags) -#define __GFP_FS (1) -#define GFP_KERNEL (2) -#define GFP_ATOMIC (4) +/* + * Universal memory allocator API + */ +enum cfs_alloc_flags { + /* allocation is not allowed to block */ + GFP_ATOMIC = 0x1, + /* allocation is allowed to block */ + __GFP_WAIT = 0x2, + /* allocation should return zeroed memory */ + __GFP_ZERO = 0x4, + /* allocation is allowed to call file-system code to free/clean + * memory */ + __GFP_FS = 0x8, + /* allocation is allowed to do io to free/clean memory */ + __GFP_IO = 0x10, + /* don't report allocation failure to the console */ + __GFP_NOWARN = 0x20, + /* standard allocator flag combination */ + GFP_IOFS = __GFP_FS | __GFP_IO, + GFP_USER = __GFP_WAIT | __GFP_FS | __GFP_IO, + GFP_NOFS = __GFP_WAIT | __GFP_IO, + GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS, +}; + +/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */ +enum cfs_alloc_page_flags { + /* allow to return page beyond KVM. It has to be mapped into KVM by + * kmap() and unmapped with kunmap(). */ + __GFP_HIGHMEM = 0x40, + GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO | + __GFP_HIGHMEM, +}; -cfs_page_t *cfs_alloc_page(int flags); -void cfs_free_page(cfs_page_t *pg); -void cfs_release_page(cfs_page_t *pg); -cfs_page_t * virt_to_page(void * addr); -int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem); +struct page *alloc_page(int flags); +void __free_page(struct page *pg); +void cfs_release_page(struct page *pg); +struct page *virt_to_page(void *addr); #define page_cache_get(a) do {} while (0) #define page_cache_release(a) do {} while (0) -static inline void *cfs_page_address(cfs_page_t *page) +static inline void *page_address(struct page *page) { return page->addr; } -static inline void *cfs_kmap(cfs_page_t *page) +static inline void *kmap(struct page *page) { return page->addr; } -static inline void cfs_kunmap(cfs_page_t *page) +static inline void kunmap(struct page *page) { return; } -static inline void cfs_get_page(cfs_page_t *page) +static inline void get_page(struct page *page) { cfs_atomic_inc(&page->count); } -static inline void cfs_put_page(cfs_page_t *page) +static inline void cfs_put_page(struct page *page) { cfs_atomic_dec(&page->count); } -static inline int cfs_page_count(cfs_page_t *page) +static inline int page_count(struct page *page) { return cfs_atomic_read(&page->count); } -#define cfs_page_index(p) ((p)->index) +#define page_index(p) ((p)->index) /* * Memory allocator */ -#define CFS_ALLOC_ATOMIC_TRY (0) -extern void *cfs_alloc(size_t nr_bytes, u_int32_t flags); -extern void cfs_free(void *addr); - -#define kmalloc cfs_alloc - -extern void *cfs_alloc_large(size_t nr_bytes); -extern void cfs_free_large(void *addr); +#define ALLOC_ATOMIC_TRY (0) +extern void *kmalloc(size_t nr_bytes, u_int32_t flags); +extern void kfree(void *addr); +extern void *vmalloc(size_t nr_bytes); +extern void vfree(void *addr); /* * SLAB allocator */ -#define CFS_SLAB_HWCACHE_ALIGN 0 +#define SLAB_HWCACHE_ALIGN 0 /* The cache name is limited to 20 chars */ -struct cfs_mem_cache { +struct kmem_cache { char name[20]; ulong_ptr_t flags; NPAGED_LOOKASIDE_LIST npll; }; -extern cfs_mem_cache_t *cfs_mem_cache_create (const char *, size_t, size_t, - unsigned long); -extern int cfs_mem_cache_destroy (cfs_mem_cache_t * ); -extern void *cfs_mem_cache_alloc (cfs_mem_cache_t *, int); -extern void cfs_mem_cache_free (cfs_mem_cache_t *, void *); +extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, + unsigned long, void *); +extern kmem_cache_destroy(struct kmem_cache *); +extern void *kmem_cache_alloc(struct kmem_cache *, int); +extern void kmem_cache_free(struct kmem_cache *, void *); /* * shrinker */ typedef int (*shrink_callback)(int nr_to_scan, gfp_t gfp_mask); -struct cfs_shrinker { +struct shrinker { shrink_callback cb; int seeks; /* seeks to recreate an obj */ @@ -238,8 +265,8 @@ struct cfs_shrinker { long nr; /* objs pending delete */ }; -struct cfs_shrinker *cfs_set_shrinker(int seeks, shrink_callback cb); -void cfs_remove_shrinker(struct cfs_shrinker *s); +struct shrinker *set_shrinker(int seeks, shrink_callback cb); +void remove_shrinker(struct shrinker *s); int start_shrinker_timer(); void stop_shrinker_timer(); @@ -248,13 +275,13 @@ void stop_shrinker_timer(); * Page allocator slabs */ -extern cfs_mem_cache_t *cfs_page_t_slab; -extern cfs_mem_cache_t *cfs_page_p_slab; +extern struct kmem_cache *cfs_page_t_slab; +extern struct kmem_cache *cfs_page_p_slab; -#define CFS_DECL_MMSPACE -#define CFS_MMSPACE_OPEN do {} while(0) -#define CFS_MMSPACE_CLOSE do {} while(0) +#define DECL_MMSPACE +#define MMSPACE_OPEN do {} while (0) +#define MMSPACE_CLOSE do {} while (0) #define cfs_mb() do {} while(0) @@ -265,7 +292,7 @@ extern cfs_mem_cache_t *cfs_page_p_slab; * MM defintions from (linux/mm.h) */ -#define CFS_DEFAULT_SEEKS 2 /* shrink seek */ +#define DEFAULT_SEEKS 2 /* shrink seek */ #else /* !__KERNEL__ */ diff --git a/libcfs/include/libcfs/winnt/winnt-prim.h b/libcfs/include/libcfs/winnt/winnt-prim.h index e234bba..1f98fd5 100644 --- a/libcfs/include/libcfs/winnt/winnt-prim.h +++ b/libcfs/include/libcfs/winnt/winnt-prim.h @@ -485,20 +485,21 @@ static __inline cfs_group_info_t *cfs_groups_alloc(int gidsetsize) { cfs_group_info_t * groupinfo; KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__)); - groupinfo = - (cfs_group_info_t *)cfs_alloc(sizeof(cfs_group_info_t), 0); + groupinfo = kmalloc(sizeof(cfs_group_info_t), 0); if (groupinfo) { memset(groupinfo, 0, sizeof(cfs_group_info_t)); } return groupinfo; } + static __inline void cfs_groups_free(cfs_group_info_t *group_info) { - KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, - __FUNCTION__)); - cfs_free(group_info); + KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, + __FUNCTION__)); + kfree(group_info); } + static __inline int cfs_set_current_groups(cfs_group_info_t *group_info) { @@ -506,6 +507,7 @@ cfs_set_current_groups(cfs_group_info_t *group_info) __FUNCTION__)); return 0; } + static __inline int groups_search(cfs_group_info_t *group_info, gid_t grp) { KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, @@ -597,7 +599,7 @@ typedef struct _TASK_MAN { spinlock_t Lock; /* Protection lock */ - cfs_mem_cache_t *slab; /* Memory slab for task slot */ + struct kmem_cache *slab; /* Memory slab for task slot */ ULONG NumOfTasks; /* Total tasks (threads) */ LIST_ENTRY TaskList; /* List of task slots */ diff --git a/libcfs/include/libcfs/winnt/winnt-tcpip.h b/libcfs/include/libcfs/winnt/winnt-tcpip.h index 74c4c74..85c5695 100644 --- a/libcfs/include/libcfs/winnt/winnt-tcpip.h +++ b/libcfs/include/libcfs/winnt/winnt-tcpip.h @@ -639,14 +639,14 @@ typedef struct { int ksnd_ntconns; /* number of tconns in list */ cfs_list_t ksnd_tconns; /* tdi connections list */ - cfs_mem_cache_t *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/ + struct kmem_cache *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/ event_t ksnd_tconn_exit; /* event signal by last tconn */ spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */ int ksnd_ntsdus; /* number of tsdu buffers allocated */ ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */ - cfs_mem_cache_t *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */ + struct kmem_cache *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */ int ksnd_nfreetsdus; /* number of tsdu buffers in the freed list */ cfs_list_t ksnd_freetsdus; /* List of the freed Tsdu buffer. */ diff --git a/libcfs/libcfs/darwin/darwin-mem.c b/libcfs/libcfs/darwin/darwin-mem.c index 6d57c25..333e010 100644 --- a/libcfs/libcfs/darwin/darwin-mem.c +++ b/libcfs/libcfs/darwin/darwin-mem.c @@ -61,9 +61,9 @@ struct cfs_zone_nob { static struct cfs_zone_nob cfs_zone_nob; static spinlock_t cfs_zone_guard; -cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize) +struct kmem_cache *mem_cache_find(const char *name, size_t objsize) { - cfs_mem_cache_t *walker = NULL; + struct kmem_cache *walker = NULL; LASSERT(cfs_zone_nob.z_nob != NULL); @@ -85,12 +85,12 @@ cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize) * survives kext unloading, so that @name cannot be just static string * embedded into kext image. */ -cfs_mem_cache_t *mem_cache_create(vm_size_t objsize, const char *name) +struct kmem_cache *mem_cache_create(vm_size_t objsize, const char *name) { - cfs_mem_cache_t *mc = NULL; + struct kmem_cache *mc = NULL; char *cname; - MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO); + MALLOC(mc, struct kmem_cache *, sizeof(struct kmem_cache), M_TEMP, M_WAITOK|M_ZERO); if (mc == NULL){ CERROR("cfs_mem_cache created fail!\n"); return NULL; @@ -105,7 +105,7 @@ cfs_mem_cache_t *mem_cache_create(vm_size_t objsize, const char *name) return mc; } -void mem_cache_destroy(cfs_mem_cache_t *mc) +void mem_cache_destroy(struct kmem_cache *mc) { /* * zone can NOT be destroyed after creating, @@ -128,17 +128,17 @@ void mem_cache_destroy(cfs_mem_cache_t *mc) #else /* !CFS_INDIVIDUAL_ZONE */ -cfs_mem_cache_t * +struct kmem_cache * mem_cache_find(const char *name, size_t objsize) { return NULL; } -cfs_mem_cache_t *mem_cache_create(vm_size_t size, const char *name) +struct kmem_cache *mem_cache_create(vm_size_t size, const char *name) { - cfs_mem_cache_t *mc = NULL; + struct kmem_cache *mc = NULL; - MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO); + MALLOC(mc, struct kmem_cache *, sizeof(struct kmem_cache), M_TEMP, M_WAITOK|M_ZERO); if (mc == NULL){ CERROR("cfs_mem_cache created fail!\n"); return NULL; @@ -148,7 +148,7 @@ cfs_mem_cache_t *mem_cache_create(vm_size_t size, const char *name) return mc; } -void mem_cache_destroy(cfs_mem_cache_t *mc) +void mem_cache_destroy(struct kmem_cache *mc) { OSMalloc_Tagfree(mc->mc_cache); FREE(mc, M_TEMP); @@ -160,45 +160,45 @@ void mem_cache_destroy(cfs_mem_cache_t *mc) #endif /* !CFS_INDIVIDUAL_ZONE */ -cfs_mem_cache_t * -cfs_mem_cache_create (const char *name, - size_t objsize, size_t off, unsigned long arg1) +struct kmem_cache * +kmem_cache_create(const char *name, size_t objsize, size_t off, + unsigned long arg1, void *ctro) { - cfs_mem_cache_t *mc; + struct kmem_cache *mc; - mc = mem_cache_find(name, objsize); - if (mc) - return mc; - mc = mem_cache_create(objsize, name); + mc = mem_cache_find(name, objsize); + if (mc) + return mc; + mc = mem_cache_create(objsize, name); return mc; } -int cfs_mem_cache_destroy (cfs_mem_cache_t *cachep) +kmem_cache_destroy (struct kmem_cache *cachep) { - mem_cache_destroy(cachep); - return 0; + mem_cache_destroy(cachep); + return 0; } -void *cfs_mem_cache_alloc (cfs_mem_cache_t *cachep, int flags) +void *kmem_cache_alloc (struct kmem_cache *cachep, int flags) { - void *result; + void *result; - /* zalloc_canblock() is not exported... Emulate it. */ - if (flags & CFS_ALLOC_ATOMIC) { - result = (void *)mem_cache_alloc_nb(cachep); - } else { - LASSERT(get_preemption_level() == 0); - result = (void *)mem_cache_alloc(cachep); - } - if (result != NULL && (flags & CFS_ALLOC_ZERO)) - memset(result, 0, cachep->mc_size); + /* zalloc_canblock() is not exported... Emulate it. */ + if (flags & GFP_ATOMIC) { + result = (void *)mem_cache_alloc_nb(cachep); + } else { + LASSERT(get_preemption_level() == 0); + result = (void *)mem_cache_alloc(cachep); + } + if (result != NULL && (flags & __GFP_ZERO)) + memset(result, 0, cachep->mc_size); - return result; + return result; } -void cfs_mem_cache_free (cfs_mem_cache_t *cachep, void *objp) +void kmem_cache_free (struct kmem_cache *cachep, void *objp) { - mem_cache_free(cachep, objp); + mem_cache_free(cachep, objp); } /* --------------------------------------------------------------------------- @@ -210,8 +210,8 @@ void cfs_mem_cache_free (cfs_mem_cache_t *cachep, void *objp) * "Raw" pages */ -static unsigned int raw_pages = 0; -static cfs_mem_cache_t *raw_page_cache = NULL; +static unsigned int raw_pages; +static struct kmem_cache *raw_page_cache; static struct xnu_page_ops raw_page_ops; static struct xnu_page_ops *page_ops[XNU_PAGE_NTYPES] = { @@ -219,35 +219,35 @@ static struct xnu_page_ops *page_ops[XNU_PAGE_NTYPES] = { }; #if defined(LIBCFS_DEBUG) -static int page_type_is_valid(cfs_page_t *page) +static int page_type_is_valid(struct page *page) { - LASSERT(page != NULL); - return 0 <= page->type && page->type < XNU_PAGE_NTYPES; + LASSERT(page != NULL); + return 0 <= page->type && page->type < XNU_PAGE_NTYPES; } -static int page_is_raw(cfs_page_t *page) +static int page_is_raw(struct page *page) { - return page->type == XNU_PAGE_RAW; + return page->type == XNU_PAGE_RAW; } #endif -static struct xnu_raw_page *as_raw(cfs_page_t *page) +static struct xnu_raw_page *as_raw(struct page *page) { - LASSERT(page_is_raw(page)); - return list_entry(page, struct xnu_raw_page, header); + LASSERT(page_is_raw(page)); + return list_entry(page, struct xnu_raw_page, header); } -static void *raw_page_address(cfs_page_t *pg) +static void *raw_page_address(struct page *pg) { - return (void *)as_raw(pg)->virtual; + return (void *)as_raw(pg)->virtual; } -static void *raw_page_map(cfs_page_t *pg) +static void *raw_page_map(struct page *pg) { - return (void *)as_raw(pg)->virtual; + return (void *)as_raw(pg)->virtual; } -static void raw_page_unmap(cfs_page_t *pg) +static void raw_page_unmap(struct page *pg) { } @@ -264,10 +264,10 @@ spinlock_t page_death_row_phylax; static void raw_page_finish(struct xnu_raw_page *pg) { - -- raw_pages; - if (pg->virtual != NULL) - cfs_mem_cache_free(raw_page_cache, pg->virtual); - cfs_free(pg); + --raw_pages; + if (pg->virtual != NULL) + kmem_cache_free(raw_page_cache, pg->virtual); + kfree(pg); } void raw_page_death_row_clean(void) @@ -294,7 +294,7 @@ void free_raw_page(struct xnu_raw_page *pg) /* * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may * block. (raw_page_done()->upl_abort() can block too) On the other - * hand, cfs_free_page() may be called in non-blockable context. To + * hand, __free_page() may be called in non-blockable context. To * work around this, park pages on global list when cannot block. */ if (get_preemption_level() > 0) { @@ -307,74 +307,74 @@ void free_raw_page(struct xnu_raw_page *pg) } } -cfs_page_t *cfs_alloc_page(u_int32_t flags) +struct page *alloc_page(u_int32_t flags) { - struct xnu_raw_page *page; + struct xnu_raw_page *page; - /* - * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ + /* + * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ - page = cfs_alloc(sizeof *page, flags); - if (page != NULL) { - page->virtual = cfs_mem_cache_alloc(raw_page_cache, flags); - if (page->virtual != NULL) { - ++ raw_pages; - page->header.type = XNU_PAGE_RAW; - atomic_set(&page->count, 1); - } else { - cfs_free(page); - page = NULL; - } - } - return page != NULL ? &page->header : NULL; + page = kmalloc(sizeof *page, flags); + if (page != NULL) { + page->virtual = kmem_cache_alloc(raw_page_cache, flags); + if (page->virtual != NULL) { + ++raw_pages; + page->header.type = XNU_PAGE_RAW; + atomic_set(&page->count, 1); + } else { + kfree(page); + page = NULL; + } + } + return page != NULL ? &page->header : NULL; } -void cfs_free_page(cfs_page_t *pages) +void __free_page(struct page *pages) { - free_raw_page(as_raw(pages)); + free_raw_page(as_raw(pages)); } -void cfs_get_page(cfs_page_t *p) +void get_page(struct page *p) { - atomic_inc(&as_raw(p)->count); + atomic_inc(&as_raw(p)->count); } -int cfs_put_page_testzero(cfs_page_t *p) +int cfs_put_page_testzero(struct page *p) { return atomic_dec_and_test(&as_raw(p)->count); } -int cfs_page_count(cfs_page_t *p) +int page_count(struct page *p) { - return atomic_read(&as_raw(p)->count); + return atomic_read(&as_raw(p)->count); } /* * Generic page operations */ -void *cfs_page_address(cfs_page_t *pg) +void *page_address(struct page *pg) { - /* - * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - LASSERT(page_type_is_valid(pg)); - return page_ops[pg->type]->page_address(pg); + /* + * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT) + * from here: this will lead to infinite recursion. + */ + LASSERT(page_type_is_valid(pg)); + return page_ops[pg->type]->page_address(pg); } -void *cfs_kmap(cfs_page_t *pg) +void *kmap(struct page *pg) { - LASSERT(page_type_is_valid(pg)); - return page_ops[pg->type]->page_map(pg); + LASSERT(page_type_is_valid(pg)); + return page_ops[pg->type]->page_map(pg); } -void cfs_kunmap(cfs_page_t *pg) +void kunmap(struct page *pg) { - LASSERT(page_type_is_valid(pg)); - page_ops[pg->type]->page_unmap(pg); + LASSERT(page_type_is_valid(pg)); + page_ops[pg->type]->page_unmap(pg); } void xnu_page_ops_register(int type, struct xnu_page_ops *ops) @@ -403,39 +403,39 @@ extern int get_preemption_level(void); #define get_preemption_level() (0) #endif -void *cfs_alloc(size_t nr_bytes, u_int32_t flags) +void *kmalloc(size_t nr_bytes, u_int32_t flags) { - int mflags; + int mflags; - mflags = 0; - if (flags & CFS_ALLOC_ATOMIC) { - mflags |= M_NOWAIT; - } else { - LASSERT(get_preemption_level() == 0); - mflags |= M_WAITOK; - } + mflags = 0; + if (flags & GFP_ATOMIC) { + mflags |= M_NOWAIT; + } else { + LASSERT(get_preemption_level() == 0); + mflags |= M_WAITOK; + } - if (flags & CFS_ALLOC_ZERO) - mflags |= M_ZERO; + if (flags & __GFP_ZERO) + mflags |= M_ZERO; - return _MALLOC(nr_bytes, M_TEMP, mflags); + return _MALLOC(nr_bytes, M_TEMP, mflags); } -void cfs_free(void *addr) +void kfree(void *addr) { - return _FREE(addr, M_TEMP); + return _FREE(addr, M_TEMP); } -void *cfs_alloc_large(size_t nr_bytes) +void *vmalloc(size_t nr_bytes) { - LASSERT(get_preemption_level() == 0); - return _MALLOC(nr_bytes, M_TEMP, M_WAITOK); + LASSERT(get_preemption_level() == 0); + return _MALLOC(nr_bytes, M_TEMP, M_WAITOK); } -void cfs_free_large(void *addr) +void vfree(void *addr) { - LASSERT(get_preemption_level() == 0); - return _FREE(addr, M_TEMP); + LASSERT(get_preemption_level() == 0); + return _FREE(addr, M_TEMP); } /* @@ -477,7 +477,8 @@ int cfs_mem_init(void) #endif CFS_INIT_LIST_HEAD(&page_death_row); spin_lock_init(&page_death_row_phylax); - raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0); + raw_page_cache = kmem_cache_create("raw-page", PAGE_CACHE_SIZE, + 0, 0, NULL); return 0; } @@ -485,7 +486,7 @@ void cfs_mem_fini(void) { raw_page_death_row_clean(); spin_lock_done(&page_death_row_phylax); - cfs_mem_cache_destroy(raw_page_cache); + kmem_cache_destroy(raw_page_cache); #if CFS_INDIVIDUAL_ZONE cfs_zone_nob.z_nob = NULL; diff --git a/libcfs/libcfs/darwin/darwin-tcpip.c b/libcfs/libcfs/darwin/darwin-tcpip.c index b91e9dc..406eb7e 100644 --- a/libcfs/libcfs/darwin/darwin-tcpip.c +++ b/libcfs/libcfs/darwin/darwin-tcpip.c @@ -179,9 +179,9 @@ libcfs_ipif_enumerate (char ***namesp) nalloc = 16; /* first guess at max interfaces */ toobig = 0; for (;;) { - if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) { + if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { toobig = 1; - nalloc = CFS_PAGE_SIZE/sizeof(*ifr); + nalloc = PAGE_CACHE_SIZE/sizeof(*ifr); CWARN("Too many interfaces: only enumerating first %d\n", nalloc); } @@ -821,9 +821,9 @@ libcfs_ipif_enumerate (char ***namesp) nalloc = 16; /* first guess at max interfaces */ toobig = 0; for (;;) { - if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) { + if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { toobig = 1; - nalloc = CFS_PAGE_SIZE/sizeof(*ifr); + nalloc = PAGE_CACHE_SIZE/sizeof(*ifr); CWARN("Too many interfaces: only enumerating first %d\n", nalloc); } diff --git a/libcfs/libcfs/darwin/darwin-tracefile.c b/libcfs/libcfs/darwin/darwin-tracefile.c index 0ecce6d..f8832a0 100644 --- a/libcfs/libcfs/darwin/darwin-tracefile.c +++ b/libcfs/libcfs/darwin/darwin-tracefile.c @@ -122,7 +122,7 @@ struct trace_cpu_data *trace_get_tcd(void) tcd = &trace_data[0].tcd; CFS_INIT_LIST_HEAD(&pages); if (get_preemption_level() == 0) - nr_pages = trace_refill_stock(tcd, CFS_ALLOC_STD, &pages); + nr_pages = trace_refill_stock(tcd, GFP_IOFS, &pages); else nr_pages = 0; spin_lock(&trace_cpu_serializer); diff --git a/libcfs/libcfs/debug.c b/libcfs/libcfs/debug.c index c80af9c..cfa027e 100644 --- a/libcfs/libcfs/debug.c +++ b/libcfs/libcfs/debug.c @@ -414,7 +414,7 @@ int libcfs_debug_init(unsigned long bufsize) max = TCD_MAX_PAGES; } else { max = (max / cfs_num_possible_cpus()); - max = (max << (20 - CFS_PAGE_SHIFT)); + max = (max << (20 - PAGE_CACHE_SHIFT)); } rc = cfs_tracefile_init(max); diff --git a/libcfs/libcfs/heap.c b/libcfs/libcfs/heap.c index 3a4f168..f9362ea 100644 --- a/libcfs/libcfs/heap.c +++ b/libcfs/libcfs/heap.c @@ -41,7 +41,7 @@ do { \ if ((h)->cbh_flags & CBH_FLAG_ATOMIC_GROW) \ LIBCFS_CPT_ALLOC_GFP((ptr), h->cbh_cptab, h->cbh_cptid, \ - CBH_NOB, CFS_ALLOC_ATOMIC); \ + CBH_NOB, GFP_ATOMIC); \ else \ LIBCFS_CPT_ALLOC((ptr), h->cbh_cptab, h->cbh_cptid, \ CBH_NOB); \ diff --git a/libcfs/libcfs/kernel_user_comm.c b/libcfs/libcfs/kernel_user_comm.c index 08974d1..4cac2e5 100644 --- a/libcfs/libcfs/kernel_user_comm.c +++ b/libcfs/libcfs/kernel_user_comm.c @@ -215,7 +215,7 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, int group, __u32 data) return -EBADF; /* freed in group_rem */ - reg = cfs_alloc(sizeof(*reg), 0); + reg = kmalloc(sizeof(*reg), 0); if (reg == NULL) return -ENOMEM; @@ -262,7 +262,7 @@ int libcfs_kkuc_group_rem(int uid, int group) reg->kr_uid, reg->kr_fp, group); if (reg->kr_fp != NULL) fput(reg->kr_fp); - cfs_free(reg); + kfree(reg); } } up_write(&kg_sem); diff --git a/libcfs/libcfs/libcfs_string.c b/libcfs/libcfs/libcfs_string.c index db376b5..73276e7 100644 --- a/libcfs/libcfs/libcfs_string.c +++ b/libcfs/libcfs/libcfs_string.c @@ -140,7 +140,7 @@ char *cfs_strdup(const char *str, u_int32_t flags) lenz = strlen(str) + 1; - dup_str = cfs_alloc(lenz, flags); + dup_str = kmalloc(lenz, flags); if (dup_str == NULL) return NULL; diff --git a/libcfs/libcfs/linux/linux-crypto.c b/libcfs/libcfs/linux/linux-crypto.c index 2f14540..7ae502f 100644 --- a/libcfs/libcfs/linux/linux-crypto.c +++ b/libcfs/libcfs/linux/linux-crypto.c @@ -205,14 +205,14 @@ struct cfs_crypto_hash_desc * int err; const struct cfs_crypto_hash_type *type; - hdesc = cfs_alloc(sizeof(*hdesc), 0); + hdesc = kmalloc(sizeof(*hdesc), 0); if (hdesc == NULL) return ERR_PTR(-ENOMEM); err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len); if (err) { - cfs_free(hdesc); + kfree(hdesc); return ERR_PTR(err); } return (struct cfs_crypto_hash_desc *)hdesc; @@ -220,7 +220,7 @@ struct cfs_crypto_hash_desc * EXPORT_SYMBOL(cfs_crypto_hash_init); int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc, - cfs_page_t *page, unsigned int offset, + struct page *page, unsigned int offset, unsigned int len) { struct scatterlist sl; @@ -252,7 +252,7 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, if (hash_len == NULL) { crypto_free_hash(((struct hash_desc *)hdesc)->tfm); - cfs_free(hdesc); + kfree(hdesc); return 0; } if (hash == NULL || *hash_len < size) { @@ -266,7 +266,7 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc, return err; } crypto_free_hash(((struct hash_desc *)hdesc)->tfm); - cfs_free(hdesc); + kfree(hdesc); return err; } EXPORT_SYMBOL(cfs_crypto_hash_final); @@ -326,7 +326,7 @@ static int cfs_crypto_test_hashes(void) * kmalloc size for 2.6.18 kernel is 128K */ unsigned int data_len = 1 * 128 * 1024; - data = cfs_alloc(data_len, 0); + data = kmalloc(data_len, 0); if (data == NULL) { CERROR("Failed to allocate mem\n"); return -ENOMEM; @@ -338,7 +338,7 @@ static int cfs_crypto_test_hashes(void) for (i = 0; i < CFS_HASH_ALG_MAX; i++) cfs_crypto_performance_test(i, data, data_len); - cfs_free(data); + kfree(data); return 0; } diff --git a/libcfs/libcfs/linux/linux-curproc.c b/libcfs/libcfs/linux/linux-curproc.c index 905befa..dd5d6ce 100644 --- a/libcfs/libcfs/linux/linux-curproc.c +++ b/libcfs/libcfs/linux/linux-curproc.c @@ -275,19 +275,19 @@ int cfs_get_environ(const char *key, char *value, int *val_len) { struct mm_struct *mm; char *buffer, *tmp_buf = NULL; - int buf_len = CFS_PAGE_SIZE; + int buf_len = PAGE_CACHE_SIZE; int key_len = strlen(key); unsigned long addr; int rc; ENTRY; - buffer = cfs_alloc(buf_len, CFS_ALLOC_USER); + buffer = kmalloc(buf_len, GFP_USER); if (!buffer) RETURN(-ENOMEM); mm = get_task_mm(current); if (!mm) { - cfs_free(buffer); + kfree(buffer); RETURN(-EINVAL); } @@ -363,9 +363,9 @@ int cfs_get_environ(const char *key, char *value, int *val_len) out: mmput(mm); - cfs_free((void *)buffer); + kfree((void *)buffer); if (tmp_buf) - cfs_free((void *)tmp_buf); + kfree((void *)tmp_buf); return rc; } EXPORT_SYMBOL(cfs_get_environ); diff --git a/libcfs/libcfs/linux/linux-mem.c b/libcfs/libcfs/linux/linux-mem.c index c174d54..cc18de2 100644 --- a/libcfs/libcfs/linux/linux-mem.c +++ b/libcfs/libcfs/linux/linux-mem.c @@ -41,155 +41,15 @@ #include #include -static unsigned int cfs_alloc_flags_to_gfp(u_int32_t flags) -{ - unsigned int mflags = 0; - - if (flags & CFS_ALLOC_ATOMIC) - mflags |= __GFP_HIGH; - else - mflags |= __GFP_WAIT; - if (flags & CFS_ALLOC_NOWARN) - mflags |= __GFP_NOWARN; - if (flags & CFS_ALLOC_IO) - mflags |= __GFP_IO; - if (flags & CFS_ALLOC_FS) - mflags |= __GFP_FS; - if (flags & CFS_ALLOC_HIGHMEM) - mflags |= __GFP_HIGHMEM; - return mflags; -} - -void * -cfs_alloc(size_t nr_bytes, u_int32_t flags) -{ - void *ptr = NULL; - - ptr = kmalloc(nr_bytes, cfs_alloc_flags_to_gfp(flags)); - if (ptr != NULL && (flags & CFS_ALLOC_ZERO)) - memset(ptr, 0, nr_bytes); - return ptr; -} - -void -cfs_free(void *addr) -{ - kfree(addr); -} - -void * -cfs_alloc_large(size_t nr_bytes) -{ - return vmalloc(nr_bytes); -} - -void -cfs_free_large(void *addr) -{ - vfree(addr); -} - -cfs_page_t *cfs_alloc_page(unsigned int flags) -{ - /* - * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT) - * from here: this will lead to infinite recursion. - */ - return alloc_page(cfs_alloc_flags_to_gfp(flags)); -} - -void cfs_free_page(cfs_page_t *page) -{ - __free_page(page); -} - -cfs_mem_cache_t * -cfs_mem_cache_create (const char *name, size_t size, size_t offset, - unsigned long flags) -{ -#ifdef HAVE_KMEM_CACHE_CREATE_DTOR - return kmem_cache_create(name, size, offset, flags, NULL, NULL); -#else - return kmem_cache_create(name, size, offset, flags, NULL); -#endif -} - -int -cfs_mem_cache_destroy (cfs_mem_cache_t * cachep) -{ -#ifdef HAVE_KMEM_CACHE_DESTROY_INT - return kmem_cache_destroy(cachep); -#else - kmem_cache_destroy(cachep); - return 0; -#endif -} - -void * -cfs_mem_cache_alloc(cfs_mem_cache_t *cachep, int flags) -{ - return kmem_cache_alloc(cachep, cfs_alloc_flags_to_gfp(flags)); -} - -void -cfs_mem_cache_free(cfs_mem_cache_t *cachep, void *objp) -{ - return kmem_cache_free(cachep, objp); -} - -/** - * Returns true if \a addr is an address of an allocated object in a slab \a - * kmem. Used in assertions. This check is optimistically imprecise, i.e., it - * occasionally returns true for the incorrect addresses, but if it returns - * false, then the addresses is guaranteed to be incorrect. - */ -int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem) -{ -#ifdef CONFIG_SLAB - struct page *page; - - /* - * XXX Copy of mm/slab.c:virt_to_cache(). It won't work with other - * allocators, like slub and slob. - */ - page = virt_to_page(addr); - if (unlikely(PageCompound(page))) - page = (struct page *)page->private; - return PageSlab(page) && ((void *)page->lru.next) == kmem; -#else - return 1; -#endif -} -EXPORT_SYMBOL(cfs_mem_is_in_cache); - - -EXPORT_SYMBOL(cfs_alloc); -EXPORT_SYMBOL(cfs_free); -EXPORT_SYMBOL(cfs_alloc_large); -EXPORT_SYMBOL(cfs_free_large); -EXPORT_SYMBOL(cfs_alloc_page); -EXPORT_SYMBOL(cfs_free_page); -EXPORT_SYMBOL(cfs_mem_cache_create); -EXPORT_SYMBOL(cfs_mem_cache_destroy); -EXPORT_SYMBOL(cfs_mem_cache_alloc); -EXPORT_SYMBOL(cfs_mem_cache_free); - -/* - * NB: we will rename some of above functions in another patch: - * - rename cfs_alloc to cfs_malloc - * - rename cfs_alloc/free_page to cfs_page_alloc/free - * - rename cfs_alloc/free_large to cfs_vmalloc/vfree - */ - void * cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes, unsigned int flags) { void *ptr; - ptr = kmalloc_node(nr_bytes, cfs_alloc_flags_to_gfp(flags), + ptr = kmalloc_node(nr_bytes, flags, cfs_cpt_spread_node(cptab, cpt)); - if (ptr != NULL && (flags & CFS_ALLOC_ZERO) != 0) + if (ptr != NULL && (flags & __GFP_ZERO) != 0) memset(ptr, 0, nr_bytes); return ptr; @@ -203,19 +63,18 @@ cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes) } EXPORT_SYMBOL(cfs_cpt_vmalloc); -cfs_page_t * +struct page * cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, unsigned int flags) { - return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), - cfs_alloc_flags_to_gfp(flags), 0); + return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0); } EXPORT_SYMBOL(cfs_page_cpt_alloc); void * -cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep, struct cfs_cpt_table *cptab, +cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab, int cpt, unsigned int flags) { - return kmem_cache_alloc_node(cachep, cfs_alloc_flags_to_gfp(flags), + return kmem_cache_alloc_node(cachep, flags, cfs_cpt_spread_node(cptab, cpt)); } EXPORT_SYMBOL(cfs_mem_cache_cpt_alloc); diff --git a/libcfs/libcfs/linux/linux-tcpip.c b/libcfs/libcfs/linux/linux-tcpip.c index e7bd69e..d9849b9 100644 --- a/libcfs/libcfs/linux/linux-tcpip.c +++ b/libcfs/libcfs/linux/linux-tcpip.c @@ -178,12 +178,12 @@ libcfs_ipif_enumerate (char ***namesp) nalloc = 16; /* first guess at max interfaces */ toobig = 0; for (;;) { - if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) { - toobig = 1; - nalloc = CFS_PAGE_SIZE/sizeof(*ifr); - CWARN("Too many interfaces: only enumerating first %d\n", - nalloc); - } + if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { + toobig = 1; + nalloc = PAGE_CACHE_SIZE/sizeof(*ifr); + CWARN("Too many interfaces: only enumerating first %d\n", + nalloc); + } LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr)); if (ifr == NULL) { diff --git a/libcfs/libcfs/linux/linux-tracefile.c b/libcfs/libcfs/linux/linux-tracefile.c index da33e01..a0168dc 100644 --- a/libcfs/libcfs/linux/linux-tracefile.c +++ b/libcfs/libcfs/linux/linux-tracefile.c @@ -269,7 +269,7 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask, int cfs_trace_max_debug_mb(void) { - int total_mb = (cfs_num_physpages >> (20 - PAGE_SHIFT)); + int total_mb = (num_physpages >> (20 - PAGE_SHIFT)); return MAX(512, (total_mb * 80)/100); } diff --git a/libcfs/libcfs/lwt.c b/libcfs/libcfs/lwt.c index ea5d076..0666b4c 100644 --- a/libcfs/libcfs/lwt.c +++ b/libcfs/libcfs/lwt.c @@ -72,19 +72,19 @@ lwt_lookup_string (int *size, char *knl_ptr, *size = strnlen (knl_ptr, maxsize - 1) + 1; - if (user_ptr != NULL) { - if (user_size < 4) - return (-EINVAL); + if (user_ptr != NULL) { + if (user_size < 4) + return -EINVAL; - if (cfs_copy_to_user (user_ptr, knl_ptr, *size)) - return (-EFAULT); + if (copy_to_user(user_ptr, knl_ptr, *size)) + return -EFAULT; - /* Did I truncate the string? */ - if (knl_ptr[*size - 1] != 0) - cfs_copy_to_user (user_ptr + *size - 4, "...", 4); - } + /* Did I truncate the string? */ + if (knl_ptr[*size - 1] != 0) + copy_to_user(user_ptr + *size - 4, "...", 4); + } - return (0); + return 0; } int @@ -115,7 +115,7 @@ lwt_control (int enable, int clear) continue; for (j = 0; j < lwt_pages_per_cpu; j++) { - memset (p->lwtp_events, 0, CFS_PAGE_SIZE); + memset(p->lwtp_events, 0, PAGE_CACHE_SIZE); p = cfs_list_entry (p->lwtp_list.next, lwt_page_t, lwtp_list); @@ -132,14 +132,14 @@ lwt_control (int enable, int clear) } int -lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size, - void *user_ptr, int user_size) +lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *total_size, + void *user_ptr, int user_size) { - const int events_per_page = CFS_PAGE_SIZE / sizeof(lwt_event_t); - const int bytes_per_page = events_per_page * sizeof(lwt_event_t); - lwt_page_t *p; - int i; - int j; + const int events_per_page = PAGE_CACHE_SIZE / sizeof(lwt_event_t); + const int bytes_per_page = events_per_page * sizeof(lwt_event_t); + lwt_page_t *p; + int i; + int j; if (!cfs_capable(CFS_CAP_SYS_ADMIN)) return (-EPERM); @@ -156,12 +156,12 @@ lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size, p = lwt_cpus[i].lwtc_current_page; if (p == NULL) - return (-ENODATA); + return -ENODATA; - for (j = 0; j < lwt_pages_per_cpu; j++) { - if (cfs_copy_to_user(user_ptr, p->lwtp_events, - bytes_per_page)) - return (-EFAULT); + for (j = 0; j < lwt_pages_per_cpu; j++) { + if (copy_to_user(user_ptr, p->lwtp_events, + bytes_per_page)) + return -EFAULT; user_ptr = ((char *)user_ptr) + bytes_per_page; p = cfs_list_entry(p->lwtp_list.next, @@ -186,12 +186,12 @@ lwt_init () /* NULL pointers, zero scalars */ memset (lwt_cpus, 0, sizeof (lwt_cpus)); - lwt_pages_per_cpu = - LWT_MEMORY / (cfs_num_online_cpus() * CFS_PAGE_SIZE); + lwt_pages_per_cpu = + LWT_MEMORY / (cfs_num_online_cpus() * PAGE_CACHE_SIZE); for (i = 0; i < cfs_num_online_cpus(); i++) for (j = 0; j < lwt_pages_per_cpu; j++) { - struct page *page = alloc_page (GFP_KERNEL); + struct page *page = alloc_page(GFP_KERNEL); lwt_page_t *lwtp; if (page == NULL) { @@ -210,7 +210,7 @@ lwt_init () lwtp->lwtp_page = page; lwtp->lwtp_events = page_address(page); - memset (lwtp->lwtp_events, 0, CFS_PAGE_SIZE); + memset(lwtp->lwtp_events, 0, PAGE_CACHE_SIZE); if (j == 0) { CFS_INIT_LIST_HEAD (&lwtp->lwtp_list); diff --git a/libcfs/libcfs/module.c b/libcfs/libcfs/module.c index 878be01..f3f8643 100644 --- a/libcfs/libcfs/module.c +++ b/libcfs/libcfs/module.c @@ -45,110 +45,109 @@ void kportal_memhog_free (struct libcfs_device_userstate *ldu) { - cfs_page_t **level0p = &ldu->ldu_memhog_root_page; - cfs_page_t **level1p; - cfs_page_t **level2p; - int count1; - int count2; + struct page **level0p = &ldu->ldu_memhog_root_page; + struct page **level1p; + struct page **level2p; + int count1; + int count2; - if (*level0p != NULL) { + if (*level0p != NULL) { + level1p = (struct page **)page_address(*level0p); + count1 = 0; - level1p = (cfs_page_t **)cfs_page_address(*level0p); - count1 = 0; + while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) && + *level1p != NULL) { - while (count1 < CFS_PAGE_SIZE/sizeof(cfs_page_t *) && - *level1p != NULL) { + level2p = (struct page **)page_address(*level1p); + count2 = 0; - level2p = (cfs_page_t **)cfs_page_address(*level1p); - count2 = 0; + while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) && + *level2p != NULL) { - while (count2 < CFS_PAGE_SIZE/sizeof(cfs_page_t *) && - *level2p != NULL) { + __free_page(*level2p); + ldu->ldu_memhog_pages--; + level2p++; + count2++; + } - cfs_free_page(*level2p); - ldu->ldu_memhog_pages--; - level2p++; - count2++; - } - - cfs_free_page(*level1p); - ldu->ldu_memhog_pages--; - level1p++; - count1++; - } + __free_page(*level1p); + ldu->ldu_memhog_pages--; + level1p++; + count1++; + } - cfs_free_page(*level0p); - ldu->ldu_memhog_pages--; + __free_page(*level0p); + ldu->ldu_memhog_pages--; - *level0p = NULL; - } + *level0p = NULL; + } - LASSERT (ldu->ldu_memhog_pages == 0); + LASSERT(ldu->ldu_memhog_pages == 0); } int kportal_memhog_alloc (struct libcfs_device_userstate *ldu, int npages, int flags) { - cfs_page_t **level0p; - cfs_page_t **level1p; - cfs_page_t **level2p; - int count1; - int count2; + struct page **level0p; + struct page **level1p; + struct page **level2p; + int count1; + int count2; - LASSERT (ldu->ldu_memhog_pages == 0); - LASSERT (ldu->ldu_memhog_root_page == NULL); + LASSERT(ldu->ldu_memhog_pages == 0); + LASSERT(ldu->ldu_memhog_root_page == NULL); - if (npages < 0) - return -EINVAL; + if (npages < 0) + return -EINVAL; - if (npages == 0) - return 0; + if (npages == 0) + return 0; - level0p = &ldu->ldu_memhog_root_page; - *level0p = cfs_alloc_page(flags); - if (*level0p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; + level0p = &ldu->ldu_memhog_root_page; + *level0p = alloc_page(flags); + if (*level0p == NULL) + return -ENOMEM; + ldu->ldu_memhog_pages++; - level1p = (cfs_page_t **)cfs_page_address(*level0p); - count1 = 0; - memset(level1p, 0, CFS_PAGE_SIZE); + level1p = (struct page **)page_address(*level0p); + count1 = 0; + memset(level1p, 0, PAGE_CACHE_SIZE); - while (ldu->ldu_memhog_pages < npages && - count1 < CFS_PAGE_SIZE/sizeof(cfs_page_t *)) { + while (ldu->ldu_memhog_pages < npages && + count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) { - if (cfs_signal_pending()) - return (-EINTR); + if (cfs_signal_pending()) + return -EINTR; - *level1p = cfs_alloc_page(flags); - if (*level1p == NULL) - return -ENOMEM; - ldu->ldu_memhog_pages++; + *level1p = alloc_page(flags); + if (*level1p == NULL) + return -ENOMEM; + ldu->ldu_memhog_pages++; - level2p = (cfs_page_t **)cfs_page_address(*level1p); - count2 = 0; - memset(level2p, 0, CFS_PAGE_SIZE); + level2p = (struct page **)page_address(*level1p); + count2 = 0; + memset(level2p, 0, PAGE_CACHE_SIZE); - while (ldu->ldu_memhog_pages < npages && - count2 < CFS_PAGE_SIZE/sizeof(cfs_page_t *)) { + while (ldu->ldu_memhog_pages < npages && + count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) { - if (cfs_signal_pending()) - return (-EINTR); + if (cfs_signal_pending()) + return -EINTR; - *level2p = cfs_alloc_page(flags); - if (*level2p == NULL) - return (-ENOMEM); - ldu->ldu_memhog_pages++; + *level2p = alloc_page(flags); + if (*level2p == NULL) + return -ENOMEM; + ldu->ldu_memhog_pages++; - level2p++; - count2++; - } + level2p++; + count2++; + } - level1p++; - count1++; - } + level1p++; + count1++; + } - return 0; + return 0; } /* called when opening /dev/device */ @@ -326,16 +325,17 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd, RETURN(err); } -static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg) +static int libcfs_ioctl(struct cfs_psdev_file *pfile, + unsigned long cmd, void *arg) { - char *buf; - struct libcfs_ioctl_data *data; - int err = 0; - ENTRY; - - LIBCFS_ALLOC_GFP(buf, 1024, CFS_ALLOC_STD); - if (buf == NULL) - RETURN(-ENOMEM); + char *buf; + struct libcfs_ioctl_data *data; + int err = 0; + ENTRY; + + LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS); + if (buf == NULL) + RETURN(-ENOMEM); /* 'cmd' and permissions get checked in our arch-specific caller */ if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) { diff --git a/libcfs/libcfs/posix/posix-debug.c b/libcfs/libcfs/posix/posix-debug.c index d4de5b9..a24e3e8 100644 --- a/libcfs/libcfs/posix/posix-debug.c +++ b/libcfs/libcfs/posix/posix-debug.c @@ -171,13 +171,13 @@ libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, const char *format1, va_list args, const char *format2, ...) { - struct timeval tv; - int nob; - int remain; - va_list ap; - char buf[CFS_PAGE_SIZE]; /* size 4096 used for compatimble - * with linux, where message can`t - * be exceed PAGE_SIZE */ + struct timeval tv; + int nob; + int remain; + va_list ap; + char buf[PAGE_CACHE_SIZE]; /* size 4096 used for compatimble + * with linux, where message can`t + * be exceed PAGE_SIZE */ int console = 0; char *prefix = "Lustre"; diff --git a/libcfs/libcfs/tracefile.c b/libcfs/libcfs/tracefile.c index 2049908..3a96dae 100644 --- a/libcfs/libcfs/tracefile.c +++ b/libcfs/libcfs/tracefile.c @@ -68,41 +68,41 @@ cfs_tage_from_list(cfs_list_t *list) static struct cfs_trace_page *cfs_tage_alloc(int gfp) { - cfs_page_t *page; - struct cfs_trace_page *tage; - - /* My caller is trying to free memory */ - if (!cfs_in_interrupt() && cfs_memory_pressure_get()) - return NULL; - - /* - * Don't spam console with allocation failures: they will be reported - * by upper layer anyway. - */ - gfp |= CFS_ALLOC_NOWARN; - page = cfs_alloc_page(gfp); - if (page == NULL) - return NULL; + struct page *page; + struct cfs_trace_page *tage; - tage = cfs_alloc(sizeof(*tage), gfp); - if (tage == NULL) { - cfs_free_page(page); - return NULL; - } + /* My caller is trying to free memory */ + if (!cfs_in_interrupt() && memory_pressure_get()) + return NULL; + + /* + * Don't spam console with allocation failures: they will be reported + * by upper layer anyway. + */ + gfp |= __GFP_NOWARN; + page = alloc_page(gfp); + if (page == NULL) + return NULL; + + tage = kmalloc(sizeof(*tage), gfp); + if (tage == NULL) { + __free_page(page); + return NULL; + } - tage->page = page; - cfs_atomic_inc(&cfs_tage_allocated); - return tage; + tage->page = page; + cfs_atomic_inc(&cfs_tage_allocated); + return tage; } static void cfs_tage_free(struct cfs_trace_page *tage) { - __LASSERT(tage != NULL); - __LASSERT(tage->page != NULL); + __LASSERT(tage != NULL); + __LASSERT(tage->page != NULL); - cfs_free_page(tage->page); - cfs_free(tage); - cfs_atomic_dec(&cfs_tage_allocated); + __free_page(tage->page); + kfree(tage); + cfs_atomic_dec(&cfs_tage_allocated); } static void cfs_tage_to_tail(struct cfs_trace_page *tage, @@ -144,7 +144,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) if (tcd->tcd_cur_pages > 0) { __LASSERT(!cfs_list_empty(&tcd->tcd_pages)); tage = cfs_tage_from_list(tcd->tcd_pages.prev); - if (tage->used + len <= CFS_PAGE_SIZE) + if (tage->used + len <= PAGE_CACHE_SIZE) return tage; } @@ -154,9 +154,9 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) --tcd->tcd_cur_stock_pages; cfs_list_del_init(&tage->linkage); } else { - tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC); + tage = cfs_tage_alloc(GFP_ATOMIC); if (unlikely(tage == NULL)) { - if ((!cfs_memory_pressure_get() || + if ((!memory_pressure_get() || cfs_in_interrupt()) && printk_ratelimit()) printk(CFS_KERN_WARNING "cannot allocate a tage (%ld)\n", @@ -225,7 +225,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, * from here: this will lead to infinite recursion. */ - if (len > CFS_PAGE_SIZE) { + if (len > PAGE_CACHE_SIZE) { printk(CFS_KERN_ERR "cowardly refusing to write %lu bytes in a page\n", len); return NULL; @@ -317,7 +317,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, for (i = 0; i < 2; i++) { tage = cfs_trace_get_tage(tcd, needed + known_size + 1); if (tage == NULL) { - if (needed + known_size > CFS_PAGE_SIZE) + if (needed + known_size > PAGE_CACHE_SIZE) mask |= D_ERROR; cfs_trace_put_tcd(tcd); @@ -325,10 +325,10 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, goto console; } - string_buf = (char *)cfs_page_address(tage->page) + + string_buf = (char *)page_address(tage->page) + tage->used + known_size; - max_nob = CFS_PAGE_SIZE - tage->used - known_size; + max_nob = PAGE_CACHE_SIZE - tage->used - known_size; if (max_nob <= 0) { printk(CFS_KERN_EMERG "negative max_nob: %d\n", max_nob); @@ -365,7 +365,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, "newline\n", file, msgdata->msg_line, msgdata->msg_fn); header.ph_len = known_size + needed; - debug_buf = (char *)cfs_page_address(tage->page) + tage->used; + debug_buf = (char *)page_address(tage->page) + tage->used; if (libcfs_debug_binary) { memcpy(debug_buf, &header, sizeof(header)); @@ -392,7 +392,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, __LASSERT(debug_buf == string_buf); tage->used += needed; - __LASSERT (tage->used <= CFS_PAGE_SIZE); + __LASSERT(tage->used <= PAGE_CACHE_SIZE); console: if ((mask & libcfs_printk) == 0) { @@ -652,14 +652,14 @@ void cfs_trace_debug_print(void) collect_pages(&pc); cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, struct cfs_trace_page, linkage) { - char *p, *file, *fn; - cfs_page_t *page; + char *p, *file, *fn; + struct page *page; - __LASSERT_TAGE_INVARIANT(tage); + __LASSERT_TAGE_INVARIANT(tage); - page = tage->page; - p = cfs_page_address(page); - while (p < ((char *)cfs_page_address(page) + tage->used)) { + page = tage->page; + p = page_address(page); + while (p < ((char *)page_address(page) + tage->used)) { struct ptldebug_header *hdr; int len; hdr = (void *)p; @@ -688,7 +688,7 @@ int cfs_tracefile_dump_all_pages(char *filename) struct cfs_trace_page *tmp; int rc; - CFS_DECL_MMSPACE; + DECL_MMSPACE; cfs_tracefile_write_lock(); @@ -711,13 +711,13 @@ int cfs_tracefile_dump_all_pages(char *filename) /* ok, for now, just write the pages. in the future we'll be building * iobufs with the pages and calling generic_direct_IO */ - CFS_MMSPACE_OPEN; + MMSPACE_OPEN; cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, struct cfs_trace_page, linkage) { __LASSERT_TAGE_INVARIANT(tage); - rc = filp_write(filp, cfs_page_address(tage->page), + rc = filp_write(filp, page_address(tage->page), tage->used, filp_poff(filp)); if (rc != (int)tage->used) { printk(CFS_KERN_WARNING "wanted to write %u but wrote " @@ -729,7 +729,7 @@ int cfs_tracefile_dump_all_pages(char *filename) cfs_list_del(&tage->linkage); cfs_tage_free(tage); } - CFS_MMSPACE_CLOSE; + MMSPACE_CLOSE; rc = filp_fsync(filp); if (rc) printk(CFS_KERN_ERR "sync returns %d\n", rc); @@ -768,7 +768,7 @@ int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob, if (usr_buffer_nob > knl_buffer_nob) return -EOVERFLOW; - if (cfs_copy_from_user((void *)knl_buffer, + if (copy_from_user((void *)knl_buffer, (void *)usr_buffer, usr_buffer_nob)) return -EFAULT; @@ -799,11 +799,11 @@ int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob, if (nob > usr_buffer_nob) nob = usr_buffer_nob; - if (cfs_copy_to_user(usr_buffer, knl_buffer, nob)) + if (copy_to_user(usr_buffer, knl_buffer, nob)) return -EFAULT; if (append != NULL && nob < usr_buffer_nob) { - if (cfs_copy_to_user(usr_buffer + nob, append, 1)) + if (copy_to_user(usr_buffer + nob, append, 1)) return -EFAULT; nob++; @@ -815,10 +815,10 @@ EXPORT_SYMBOL(cfs_trace_copyout_string); int cfs_trace_allocate_string_buffer(char **str, int nob) { - if (nob > 2 * CFS_PAGE_SIZE) /* string must be "sensible" */ + if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ return -EINVAL; - *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO); + *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO); if (*str == NULL) return -ENOMEM; @@ -827,7 +827,7 @@ int cfs_trace_allocate_string_buffer(char **str, int nob) void cfs_trace_free_string_buffer(char *str, int nob) { - cfs_free(str); + kfree(str); } int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob) @@ -937,7 +937,7 @@ int cfs_trace_set_debug_mb(int mb) } mb /= cfs_num_possible_cpus(); - pages = mb << (20 - CFS_PAGE_SHIFT); + pages = mb << (20 - PAGE_CACHE_SHIFT); cfs_tracefile_write_lock(); @@ -975,7 +975,7 @@ int cfs_trace_get_debug_mb(void) cfs_tracefile_read_unlock(); - return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1; + return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; } static int tracefiled(void *arg) @@ -988,7 +988,7 @@ static int tracefiled(void *arg) int last_loop = 0; int rc; - CFS_DECL_MMSPACE; + DECL_MMSPACE; /* we're started late enough that we pick up init's fs context */ /* this is so broken in uml? what on earth is going on? */ @@ -1024,7 +1024,7 @@ static int tracefiled(void *arg) goto end_loop; } - CFS_MMSPACE_OPEN; + MMSPACE_OPEN; cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages, struct cfs_trace_page, @@ -1038,7 +1038,7 @@ static int tracefiled(void *arg) else if (f_pos > (off_t)filp_size(filp)) f_pos = filp_size(filp); - rc = filp_write(filp, cfs_page_address(tage->page), + rc = filp_write(filp, page_address(tage->page), tage->used, &f_pos); if (rc != (int)tage->used) { printk(CFS_KERN_WARNING "wanted to write %u " @@ -1047,7 +1047,7 @@ static int tracefiled(void *arg) __LASSERT(cfs_list_empty(&pc.pc_pages)); } } - CFS_MMSPACE_CLOSE; + MMSPACE_CLOSE; filp_close(filp, NULL); put_pages_on_daemon_list(&pc); diff --git a/libcfs/libcfs/tracefile.h b/libcfs/libcfs/tracefile.h index 968c128..196ab96 100644 --- a/libcfs/libcfs/tracefile.h +++ b/libcfs/libcfs/tracefile.h @@ -92,7 +92,7 @@ extern void libcfs_unregister_panic_notifier(void); extern int libcfs_panic_in_progress; extern int cfs_trace_max_debug_mb(void); -#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT)) +#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) #define TCD_STOCK_PAGES (TCD_MAX_PAGES) #define CFS_TRACEFILE_SIZE (500 << 20) @@ -101,7 +101,7 @@ extern int cfs_trace_max_debug_mb(void); /* * Private declare for tracefile */ -#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT)) +#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) #define TCD_STOCK_PAGES (TCD_MAX_PAGES) #define CFS_TRACEFILE_SIZE (500 << 20) @@ -239,7 +239,7 @@ struct cfs_trace_page { /* * page itself */ - cfs_page_t *page; + struct page *page; /* * linkage into one of the lists in trace_data_union or * page_collection @@ -337,8 +337,8 @@ do { \ do { \ __LASSERT(tage != NULL); \ __LASSERT(tage->page != NULL); \ - __LASSERT(tage->used <= CFS_PAGE_SIZE); \ - __LASSERT(cfs_page_count(tage->page) > 0); \ + __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ + __LASSERT(page_count(tage->page) > 0); \ } while (0) #endif /* LUSTRE_TRACEFILE_PRIVATE */ diff --git a/libcfs/libcfs/user-crypto.c b/libcfs/libcfs/user-crypto.c index 224ca77..5ad136a 100644 --- a/libcfs/libcfs/user-crypto.c +++ b/libcfs/libcfs/user-crypto.c @@ -214,7 +214,7 @@ struct cfs_crypto_hash_desc return ERR_PTR(-ENODEV); } - hdesc = cfs_alloc(sizeof(*hdesc) + ha->ha_ctx_size, 0); + hdesc = kmalloc(sizeof(*hdesc) + ha->ha_ctx_size, 0); if (hdesc == NULL) return ERR_PTR(-ENOMEM); @@ -225,7 +225,7 @@ struct cfs_crypto_hash_desc if (err == 0) { return (struct cfs_crypto_hash_desc *) hdesc; } else { - cfs_free(hdesc); + kfree(hdesc); return ERR_PTR(err); } } @@ -241,7 +241,7 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf, } int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc, - cfs_page_t *page, unsigned int offset, + struct page *page, unsigned int offset, unsigned int len) { const void *p = page->addr + offset; @@ -262,7 +262,7 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc, int err; if (hash_len == NULL) { - cfs_free(d); + kfree(d); return 0; } if (hash == NULL || *hash_len < size) { @@ -274,7 +274,7 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc, err = d->hd_hash->final(d->hd_ctx, hash, *hash_len); if (err == 0) { /* If get final digest success free hash descriptor */ - cfs_free(d); + kfree(d); } return err; @@ -370,7 +370,7 @@ static int cfs_crypto_test_hashes(void) unsigned char *data; unsigned int j, data_len = 1024 * 1024; - data = cfs_alloc(data_len, 0); + data = kmalloc(data_len, 0); if (data == NULL) { CERROR("Failed to allocate mem\n"); return -ENOMEM; @@ -381,7 +381,7 @@ static int cfs_crypto_test_hashes(void) for (i = 0; i < CFS_HASH_ALG_MAX; i++) cfs_crypto_performance_test(i, data, data_len); - cfs_free(data); + kfree(data); return 0; } diff --git a/libcfs/libcfs/user-mem.c b/libcfs/libcfs/user-mem.c index 615fa00..af1d09c 100644 --- a/libcfs/libcfs/user-mem.c +++ b/libcfs/libcfs/user-mem.c @@ -47,9 +47,9 @@ * Allocator */ -cfs_page_t *cfs_alloc_page(unsigned int flags) +struct page *alloc_page(unsigned int flags) { - cfs_page_t *pg = malloc(sizeof(*pg)); + struct page *pg = malloc(sizeof(*pg)); int rc = 0; if (!pg) @@ -57,11 +57,11 @@ cfs_page_t *cfs_alloc_page(unsigned int flags) pg->addr = NULL; #if defined (__DARWIN__) - pg->addr = valloc(CFS_PAGE_SIZE); + pg->addr = valloc(PAGE_CACHE_SIZE); #elif defined (__WINNT__) pg->addr = pgalloc(0); #else - rc = posix_memalign(&pg->addr, CFS_PAGE_SIZE, CFS_PAGE_SIZE); + rc = posix_memalign(&pg->addr, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE); #endif if (rc != 0 || pg->addr == NULL) { free(pg); @@ -70,7 +70,7 @@ cfs_page_t *cfs_alloc_page(unsigned int flags) return pg; } -void cfs_free_page(cfs_page_t *pg) +void __free_page(struct page *pg) { #if defined (__WINNT__) pgfree(pg->addr); @@ -81,17 +81,17 @@ void cfs_free_page(cfs_page_t *pg) free(pg); } -void *cfs_page_address(cfs_page_t *pg) +void *page_address(struct page *pg) { return pg->addr; } -void *cfs_kmap(cfs_page_t *pg) +void *kmap(struct page *pg) { return pg->addr; } -void cfs_kunmap(cfs_page_t *pg) +void kunmap(struct page *pg) { } @@ -99,10 +99,11 @@ void cfs_kunmap(cfs_page_t *pg) * SLAB allocator */ -cfs_mem_cache_t * -cfs_mem_cache_create(const char *name, size_t objsize, size_t off, unsigned long flags) +struct kmem_cache * +kmem_cache_create(const char *name, size_t objsize, size_t off, + unsigned long flags, void *ctor) { - cfs_mem_cache_t *c; + struct kmem_cache *c; c = malloc(sizeof(*c)); if (!c) @@ -113,21 +114,20 @@ cfs_mem_cache_create(const char *name, size_t objsize, size_t off, unsigned long return c; } -int cfs_mem_cache_destroy(cfs_mem_cache_t *c) +void kmem_cache_destroy(struct kmem_cache *c) { CDEBUG(D_MALLOC, "destroy slab cache %p, objsize %u\n", c, c->size); free(c); - return 0; } -void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp) +void *kmem_cache_alloc(struct kmem_cache *c, int gfp) { - return cfs_alloc(c->size, gfp); + return kmalloc(c->size, gfp); } -void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr) +void kmem_cache_free(struct kmem_cache *c, void *addr) { - cfs_free(addr); + kfree(addr); } /** @@ -136,7 +136,7 @@ void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr) * occasionally returns true for the incorrect addresses, but if it returns * false, then the addresses is guaranteed to be incorrect. */ -int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem) +int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem) { return 1; } diff --git a/libcfs/libcfs/winnt/winnt-curproc.c b/libcfs/libcfs/winnt/winnt-curproc.c index ba5328f..e59719e 100644 --- a/libcfs/libcfs/winnt/winnt-curproc.c +++ b/libcfs/libcfs/winnt/winnt-curproc.c @@ -155,18 +155,12 @@ struct idr_context * cfs_win_task_slot_idp = NULL; * task slot routiens */ -PTASK_SLOT -alloc_task_slot() +PTASK_SLOT alloc_task_slot() { - PTASK_SLOT task = NULL; - - if (cfs_win_task_manger.slab) { - task = cfs_mem_cache_alloc(cfs_win_task_manger.slab, 0); - } else { - task = cfs_alloc(sizeof(TASK_SLOT), 0); - } - - return task; + if (cfs_win_task_manger.slab) + return kmem_cache_alloc(cfs_win_task_manger.slab, 0); + else + return kmalloc(sizeof(TASK_SLOT), 0); } void @@ -178,18 +172,15 @@ init_task_slot(PTASK_SLOT task) cfs_init_event(&task->Event, TRUE, FALSE); } -void -cleanup_task_slot(PTASK_SLOT task) +void cleanup_task_slot(PTASK_SLOT task) { - if (task->task.pid) { - cfs_idr_remove(cfs_win_task_slot_idp, task->task.pid); - } + if (task->task.pid) + cfs_idr_remove(cfs_win_task_slot_idp, task->task.pid); - if (cfs_win_task_manger.slab) { - cfs_mem_cache_free(cfs_win_task_manger.slab, task); - } else { - cfs_free(task); - } + if (cfs_win_task_manger.slab) + kmem_cache_free(cfs_win_task_manger.slab, task); + else + kfree(task); } /* @@ -243,9 +234,9 @@ init_task_manager() /* initialize the spinlock protection */ spin_lock_init(&cfs_win_task_manger.Lock); - /* create slab memory cache */ - cfs_win_task_manger.slab = cfs_mem_cache_create( - "TSLT", sizeof(TASK_SLOT), 0, 0); + /* create slab memory cache */ + cfs_win_task_manger.slab = kmem_cache_create("TSLT", sizeof(TASK_SLOT), + 0, 0, NULL); /* intialize the list header */ InitializeListHead(&(cfs_win_task_manger.TaskList)); @@ -300,9 +291,9 @@ cleanup_task_manager() spin_unlock(&cfs_win_task_manger.Lock); - /* destroy the taskslot cache slab */ - cfs_mem_cache_destroy(cfs_win_task_manger.slab); - memset(&cfs_win_task_manger, 0, sizeof(TASK_MAN)); + /* destroy the taskslot cache slab */ + kmem_cache_destroy(cfs_win_task_manger.slab); + memset(&cfs_win_task_manger, 0, sizeof(TASK_MAN)); } diff --git a/libcfs/libcfs/winnt/winnt-fs.c b/libcfs/libcfs/winnt/winnt-fs.c index 9a5a2f6..b9b1027 100644 --- a/libcfs/libcfs/winnt/winnt-fs.c +++ b/libcfs/libcfs/winnt/winnt-fs.c @@ -150,16 +150,16 @@ struct file *filp_open(const char *name, int flags, int mode, int *err) return ERR_PTR(-EINVAL); } - AnsiString = cfs_alloc(sizeof(CHAR) * (NameLength + PrefixLength + 1), - CFS_ALLOC_ZERO); + AnsiString = kmalloc(sizeof(CHAR) * (NameLength + PrefixLength + 1), + __GFP_ZERO); if (NULL == AnsiString) return ERR_PTR(-ENOMEM); UnicodeString = - cfs_alloc(sizeof(WCHAR) * (NameLength + PrefixLength + 1), - CFS_ALLOC_ZERO); + kmalloc(sizeof(WCHAR) * (NameLength + PrefixLength + 1), + __GFP_ZERO); if (NULL == UnicodeString) { - cfs_free(AnsiString); + kfree(AnsiString); return ERR_PTR(-ENOMEM); } @@ -205,19 +205,19 @@ struct file *filp_open(const char *name, int flags, int mode, int *err) /* Check the returned status of IoStatus... */ if (!NT_SUCCESS(IoStatus.Status)) { - cfs_free(UnicodeString); - cfs_free(AnsiString); + kfree(UnicodeString); + kfree(AnsiString); return ERR_PTR(cfs_error_code(IoStatus.Status)); } /* Allocate the file_t: libcfs file object */ - fp = cfs_alloc(sizeof(*fp) + NameLength, CFS_ALLOC_ZERO); + fp = kmalloc(sizeof(*fp) + NameLength, __GFP_ZERO); if (NULL == fp) { Status = ZwClose(FileHandle); ASSERT(NT_SUCCESS(Status)); - cfs_free(UnicodeString); - cfs_free(AnsiString); + kfree(UnicodeString); + kfree(AnsiString); return ERR_PTR(-ENOMEM); } @@ -227,11 +227,11 @@ struct file *filp_open(const char *name, int flags, int mode, int *err) fp->f_mode = (mode_t)mode; fp->f_count = 1; - /* free the memory of temporary name strings */ - cfs_free(UnicodeString); - cfs_free(AnsiString); + /* free the memory of temporary name strings */ + kfree(UnicodeString); + kfree(AnsiString); - return fp; + return fp; } @@ -261,9 +261,9 @@ int filp_close(file_t *fp, void *id) Status = ZwClose(fp->f_handle); ASSERT(NT_SUCCESS(Status)); - /* free the file flip structure */ - cfs_free(fp); - return 0; + /* free the file flip structure */ + kfree(fp); + return 0; } @@ -683,6 +683,6 @@ void dput(struct dentry *de) return; } if (cfs_atomic_dec_and_test(&de->d_count)) { - cfs_free(de); + kfree(de); } } diff --git a/libcfs/libcfs/winnt/winnt-mem.c b/libcfs/libcfs/winnt/winnt-mem.c index f6cfb1d..5dc7958 100644 --- a/libcfs/libcfs/winnt/winnt-mem.c +++ b/libcfs/libcfs/winnt/winnt-mem.c @@ -39,37 +39,37 @@ #include -cfs_mem_cache_t *cfs_page_t_slab = NULL; -cfs_mem_cache_t *cfs_page_p_slab = NULL; +struct kmem_cache *cfs_page_t_slab; +struct kmem_cache *cfs_page_p_slab; -cfs_page_t * virt_to_page(void * addr) +struct page *virt_to_page(void *addr) { - cfs_page_t *pg; - pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0); - - if (NULL == pg) { - cfs_enter_debugger(); - return NULL; - } + struct page *pg; + pg = kmem_cache_alloc(cfs_page_t_slab, 0); + + if (NULL == pg) { + cfs_enter_debugger(); + return NULL; + } - memset(pg, 0, sizeof(cfs_page_t)); - pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1))); - pg->mapping = addr; - cfs_atomic_set(&pg->count, 1); + memset(pg, 0, sizeof(struct page)); + pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1))); + pg->mapping = addr; + cfs_atomic_set(&pg->count, 1); set_bit(PG_virt, &(pg->flags)); - cfs_enter_debugger(); - return pg; + cfs_enter_debugger(); + return pg; } /* - * cfs_alloc_page - * To allocate the cfs_page_t and also 1 page of memory + * alloc_page + * To allocate the struct page and also 1 page of memory * * Arguments: * flags: the allocation options * * Return Value: - * pointer to the cfs_page_t strcture in success or + * pointer to the struct page strcture in success or * NULL in failure case * * Notes: @@ -78,40 +78,39 @@ cfs_page_t * virt_to_page(void * addr) cfs_atomic_t libcfs_total_pages; -cfs_page_t * cfs_alloc_page(int flags) +struct page *alloc_page(int flags) { - cfs_page_t *pg; - pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0); - - if (NULL == pg) { - cfs_enter_debugger(); - return NULL; - } + struct page *pg; + pg = kmem_cache_alloc(cfs_page_t_slab, 0); - memset(pg, 0, sizeof(cfs_page_t)); - pg->addr = cfs_mem_cache_alloc(cfs_page_p_slab, 0); - cfs_atomic_set(&pg->count, 1); - - if (pg->addr) { - if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) { - memset(pg->addr, 0, CFS_PAGE_SIZE); - } - cfs_atomic_inc(&libcfs_total_pages); - } else { - cfs_enter_debugger(); - cfs_mem_cache_free(cfs_page_t_slab, pg); - pg = NULL; - } + if (NULL == pg) { + cfs_enter_debugger(); + return NULL; + } + + memset(pg, 0, sizeof(struct page)); + pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0); + cfs_atomic_set(&pg->count, 1); + + if (pg->addr) { + if (cfs_is_flag_set(flags, __GFP_ZERO)) + memset(pg->addr, 0, PAGE_CACHE_SIZE); + cfs_atomic_inc(&libcfs_total_pages); + } else { + cfs_enter_debugger(); + kmem_cache_free(cfs_page_t_slab, pg); + pg = NULL; + } - return pg; + return pg; } /* - * cfs_free_page - * To free the cfs_page_t including the page + * __free_page + * To free the struct page including the page * * Arguments: - * pg: pointer to the cfs_page_t strcture + * pg: pointer to the struct page strcture * * Return Value: * N/A @@ -119,30 +118,30 @@ cfs_page_t * cfs_alloc_page(int flags) * Notes: * N/A */ -void cfs_free_page(cfs_page_t *pg) +void __free_page(struct page *pg) { - ASSERT(pg != NULL); - ASSERT(pg->addr != NULL); - ASSERT(cfs_atomic_read(&pg->count) <= 1); + ASSERT(pg != NULL); + ASSERT(pg->addr != NULL); + ASSERT(cfs_atomic_read(&pg->count) <= 1); if (!test_bit(PG_virt, &pg->flags)) { - cfs_mem_cache_free(cfs_page_p_slab, pg->addr); - cfs_atomic_dec(&libcfs_total_pages); - } else { - cfs_enter_debugger(); - } - cfs_mem_cache_free(cfs_page_t_slab, pg); + kmem_cache_free(cfs_page_p_slab, pg->addr); + cfs_atomic_dec(&libcfs_total_pages); + } else { + cfs_enter_debugger(); + } + kmem_cache_free(cfs_page_t_slab, pg); } -int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem) +int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem) { - KdPrint(("cfs_mem_is_in_cache: not implemented. (should maintain a" - "chain to keep all allocations traced.)\n")); - return 1; + KdPrint(("kmem_is_in_cache: not implemented. (should maintain a" + "chain to keep all allocations traced.)\n")); + return 1; } /* - * cfs_alloc + * kmalloc * To allocate memory from system pool * * Arguments: @@ -158,25 +157,23 @@ int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem) */ void * -cfs_alloc(size_t nr_bytes, u_int32_t flags) +kmalloc(size_t nr_bytes, u_int32_t flags) { - void *ptr; + void *ptr; - /* Ignore the flags: always allcoate from NonPagedPool */ - ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs'); - if (ptr != NULL && (flags & CFS_ALLOC_ZERO)) { - memset(ptr, 0, nr_bytes); - } + /* Ignore the flags: always allcoate from NonPagedPool */ + ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs'); + if (ptr != NULL && (flags & __GFP_ZERO)) + memset(ptr, 0, nr_bytes); - if (!ptr) { - cfs_enter_debugger(); - } + if (!ptr) + cfs_enter_debugger(); - return ptr; + return ptr; } /* - * cfs_free + * kfree * To free the sepcified memory to system pool * * Arguments: @@ -190,13 +187,13 @@ cfs_alloc(size_t nr_bytes, u_int32_t flags) */ void -cfs_free(void *addr) +kfree(void *addr) { - ExFreePool(addr); + ExFreePool(addr); } /* - * cfs_alloc_large + * vmalloc * To allocate large block of memory from system pool * * Arguments: @@ -211,13 +208,13 @@ cfs_free(void *addr) */ void * -cfs_alloc_large(size_t nr_bytes) +vmalloc(size_t nr_bytes) { - return cfs_alloc(nr_bytes, 0); + return kmalloc(nr_bytes, 0); } /* - * cfs_free_large + * vfree * To free the sepcified memory to system pool * * Arguments: @@ -230,15 +227,14 @@ cfs_alloc_large(size_t nr_bytes) * N/A */ -void -cfs_free_large(void *addr) +void vfree(void *addr) { - cfs_free(addr); + kfree(addr); } /* - * cfs_mem_cache_create + * kmem_cache_create * To create a SLAB cache * * Arguments: @@ -258,32 +254,26 @@ cfs_free_large(void *addr) * 3, parameters C/D are removed. */ -cfs_mem_cache_t * -cfs_mem_cache_create( - const char * name, - size_t size, - size_t offset, - unsigned long flags - ) +struct kmem_cache *kmem_cache_create(const char *name, size_t size, + size_t offset, unsigned long flags, + void *ctor) { - cfs_mem_cache_t * kmc = NULL; + struct kmem_cache *kmc = NULL; - /* The name of the SLAB could not exceed 20 chars */ + /* The name of the SLAB could not exceed 20 chars */ - if (name && strlen(name) >= 20) { - goto errorout; - } + if (name && strlen(name) >= 20) + goto errorout; - /* Allocate and initialize the SLAB strcture */ + /* Allocate and initialize the SLAB strcture */ - kmc = cfs_alloc (sizeof(cfs_mem_cache_t), 0); + kmc = kmalloc(sizeof(struct kmem_cache), 0); - if (NULL == kmc) { - goto errorout; - } + if (NULL == kmc) + goto errorout; - memset(kmc, 0, sizeof(cfs_mem_cache_t)); - kmc->flags = flags; + memset(kmc, 0, sizeof(struct kmem_cache)); + kmc->flags = flags; if (name) { strcpy(&kmc->name[0], name); @@ -306,7 +296,7 @@ errorout: } /* - * cfs_mem_cache_destroy + *kmem_cache_destroy * To destroy the unused SLAB cache * * Arguments: @@ -320,19 +310,19 @@ errorout: * N/A */ -int cfs_mem_cache_destroy (cfs_mem_cache_t * kmc) +kmem_cache_destroy(struct kmem_cache *kmc) { - ASSERT(kmc != NULL); + ASSERT(kmc != NULL); - ExDeleteNPagedLookasideList(&(kmc->npll)); + ExDeleteNPagedLookasideList(&(kmc->npll)); - cfs_free(kmc); + kfree(kmc); - return 0; + return 0; } /* - * cfs_mem_cache_alloc + * kmem_cache_alloc * To allocate an object (LookAside entry) from the SLAB * * Arguments: @@ -347,17 +337,17 @@ int cfs_mem_cache_destroy (cfs_mem_cache_t * kmc) * N/A */ -void *cfs_mem_cache_alloc(cfs_mem_cache_t * kmc, int flags) +void *kmem_cache_alloc(struct kmem_cache *kmc, int flags) { - void *buf = NULL; + void *buf = NULL; - buf = ExAllocateFromNPagedLookasideList(&(kmc->npll)); + buf = ExAllocateFromNPagedLookasideList(&(kmc->npll)); - return buf; + return buf; } /* - * cfs_mem_cache_free + * kmem_cache_free * To free an object (LookAside entry) to the SLAB cache * * Arguments: @@ -371,7 +361,7 @@ void *cfs_mem_cache_alloc(cfs_mem_cache_t * kmc, int flags) * N/A */ -void cfs_mem_cache_free(cfs_mem_cache_t * kmc, void * buf) +void kmem_cache_free(struct kmem_cache *kmc, void *buf) { ExFreeToNPagedLookasideList(&(kmc->npll), buf); } @@ -380,10 +370,10 @@ spinlock_t shrinker_guard = {0}; CFS_LIST_HEAD(shrinker_hdr); cfs_timer_t shrinker_timer = {0}; -struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb) +struct shrinker *set_shrinker(int seeks, shrink_callback cb) { - struct cfs_shrinker * s = (struct cfs_shrinker *) - cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO); + struct shrinker *s = (struct shrinker *) + kmalloc(sizeof(struct shrinker), __GFP_ZERO); if (s) { s->cb = cb; s->seeks = seeks; @@ -396,33 +386,33 @@ struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb) return s; } -void cfs_remove_shrinker(struct cfs_shrinker *s) +void remove_shrinker(struct shrinker *s) { - struct cfs_shrinker *tmp; + struct shrinker *tmp; spin_lock(&shrinker_guard); #if TRUE - cfs_list_for_each_entry_typed(tmp, &shrinker_hdr, - struct cfs_shrinker, list) { - if (tmp == s) { - cfs_list_del(&tmp->list); - break; - } - } + cfs_list_for_each_entry_typed(tmp, &shrinker_hdr, + struct shrinker, list) { + if (tmp == s) { + cfs_list_del(&tmp->list); + break; + } + } #else - cfs_list_del(&s->list); + cfs_list_del(&s->list); #endif spin_unlock(&shrinker_guard); - cfs_free(s); + kfree(s); } /* time ut test proc */ void shrinker_timer_proc(ulong_ptr_t arg) { - struct cfs_shrinker *s; + struct shrinker *s; spin_lock(&shrinker_guard); cfs_list_for_each_entry_typed(s, &shrinker_hdr, - struct cfs_shrinker, list) { + struct shrinker, list) { s->cb(s->nr, __GFP_FS); } spin_unlock(&shrinker_guard); diff --git a/libcfs/libcfs/winnt/winnt-module.c b/libcfs/libcfs/winnt/winnt-module.c index 3a3b3f9..c11fff2 100644 --- a/libcfs/libcfs/winnt/winnt-module.c +++ b/libcfs/libcfs/winnt/winnt-module.c @@ -49,7 +49,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg) hdr = (struct libcfs_ioctl_hdr *)buf; data = (struct libcfs_ioctl_data *)buf; - err = cfs_copy_from_user(buf, (void *)arg, sizeof(*hdr)); + err = copy_from_user(buf, (void *)arg, sizeof(*hdr)); if (err) RETURN(err); @@ -68,7 +68,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg) RETURN(-EINVAL); } - err = cfs_copy_from_user(buf, (void *)arg, hdr->ioc_len); + err = copy_from_user(buf, (void *)arg, hdr->ioc_len); if (err) RETURN(err); @@ -89,7 +89,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg) int libcfs_ioctl_popdata(void *arg, void *data, int size) { - if (cfs_copy_to_user((char *)arg, data, size)) + if (copy_to_user((char *)arg, data, size)) return -EFAULT; return 0; } diff --git a/libcfs/libcfs/winnt/winnt-prim.c b/libcfs/libcfs/winnt/winnt-prim.c index 1d04567..6bedace 100644 --- a/libcfs/libcfs/winnt/winnt-prim.c +++ b/libcfs/libcfs/winnt/winnt-prim.c @@ -59,9 +59,7 @@ */ void -cfs_thread_proc( - void * context - ) +cfs_thread_proc(void *context) { cfs_thread_context_t * thread_context = (cfs_thread_context_t *) context; @@ -74,7 +72,7 @@ cfs_thread_proc( /* Free the context memory */ - cfs_free(context); + kfree(context); /* Terminate this system thread */ @@ -101,11 +99,11 @@ cfs_task_t kthread_run(int (*func)(void *), void *arg, char *name) { cfs_handle_t thread = NULL; NTSTATUS status; - cfs_thread_context_t * context = NULL; + cfs_thread_context_t *context = NULL; /* Allocate the context to be transferred to system thread */ - context = cfs_alloc(sizeof(cfs_thread_context_t), CFS_ALLOC_ZERO); + context = kmalloc(sizeof(cfs_thread_context_t), __GFP_ZERO); if (!context) { return ERR_PTR(-ENOMEM); @@ -126,7 +124,7 @@ cfs_task_t kthread_run(int (*func)(void *), void *arg, char *name) if (!NT_SUCCESS(status)) { - cfs_free(context); + kfree(context); /* We need translate the nt status to linux error code */ @@ -248,10 +246,10 @@ cfs_symbol_register(const char *name, const void *value) struct cfs_symbol *sym = NULL; struct cfs_symbol *new = NULL; - new = cfs_alloc(sizeof(struct cfs_symbol), CFS_ALLOC_ZERO); - if (!new) { - return (-ENOMEM); - } + new = kmalloc(sizeof(struct cfs_symbol), __GFP_ZERO); + if (!new) + return -ENOMEM; + strncpy(new->name, name, CFS_SYMBOL_LEN); new->value = (void *)value; new->ref = 0; @@ -262,7 +260,7 @@ cfs_symbol_register(const char *name, const void *value) sym = cfs_list_entry (walker, struct cfs_symbol, sym_list); if (!strcmp(sym->name, name)) { up_write(&cfs_symbol_lock); - cfs_free(new); + kfree(new); return 0; /* alreay registerred */ } } @@ -299,7 +297,7 @@ cfs_symbol_unregister(const char *name) if (!strcmp(sym->name, name)) { LASSERT(sym->ref == 0); cfs_list_del (&sym->sym_list); - cfs_free(sym); + kfree(sym); break; } } @@ -331,7 +329,7 @@ cfs_symbol_clean() sym = cfs_list_entry (walker, struct cfs_symbol, sym_list); LASSERT(sym->ref == 0); cfs_list_del (&sym->sym_list); - cfs_free(sym); + kfree(sym); } up_write(&cfs_symbol_lock); return; @@ -767,12 +765,12 @@ libcfs_arch_init(void) and kernel ntoskrnl.lib) */ cfs_libc_init(); - /* create slab memory caches for page alloctors */ - cfs_page_t_slab = cfs_mem_cache_create( - "CPGT", sizeof(cfs_page_t), 0, 0 ); + /* create slab memory caches for page alloctors */ + cfs_page_t_slab = kmem_cache_create("CPGT", sizeof(struct page), + 0, 0, NULL); - cfs_page_p_slab = cfs_mem_cache_create( - "CPGP", CFS_PAGE_SIZE, 0, 0 ); + cfs_page_p_slab = kmem_cache_create("CPGP", PAGE_CACHE_SIZE, + 0, 0, NULL); if ( cfs_page_t_slab == NULL || cfs_page_p_slab == NULL ){ @@ -810,15 +808,13 @@ libcfs_arch_init(void) errorout: - if (rc != 0) { - /* destroy the taskslot cache slab */ - if (cfs_page_t_slab) { - cfs_mem_cache_destroy(cfs_page_t_slab); - } - if (cfs_page_p_slab) { - cfs_mem_cache_destroy(cfs_page_p_slab); - } - } + if (rc != 0) { + /* destroy the taskslot cache slab */ + if (cfs_page_t_slab) + kmem_cache_destroy(cfs_page_t_slab); + if (cfs_page_p_slab) + kmem_cache_destroy(cfs_page_p_slab); + } return rc; } @@ -840,11 +836,11 @@ libcfs_arch_cleanup(void) /* destroy the taskslot cache slab */ if (cfs_page_t_slab) { - cfs_mem_cache_destroy(cfs_page_t_slab); +kmem_cache_destroy(cfs_page_t_slab); } if (cfs_page_p_slab) { - cfs_mem_cache_destroy(cfs_page_p_slab); +kmem_cache_destroy(cfs_page_p_slab); } return; diff --git a/libcfs/libcfs/winnt/winnt-proc.c b/libcfs/libcfs/winnt/winnt-proc.c index 6a58609..770a452 100644 --- a/libcfs/libcfs/winnt/winnt-proc.c +++ b/libcfs/libcfs/winnt/winnt-proc.c @@ -64,7 +64,7 @@ cfs_proc_entry_t * cfs_proc_dev = NULL; /* SLAB object for cfs_proc_entry_t allocation */ -cfs_mem_cache_t * proc_entry_cache = NULL; +struct kmem_cache *proc_entry_cache; /* root node for sysctl table */ cfs_sysctl_table_header_t root_table_header; @@ -98,9 +98,10 @@ proc_file_read(struct file * file, const char * buf, size_t nbytes, loff_t *ppos char *start; cfs_proc_entry_t * dp; - dp = (cfs_proc_entry_t *) file->f_inode->i_priv; - if (!(page = (char*) cfs_alloc(CFS_PAGE_SIZE, 0))) - return -ENOMEM; + dp = (cfs_proc_entry_t *) file->f_inode->i_priv; + page = (char *) kmalloc(PAGE_CACHE_SIZE, 0); + if (page == NULL) + return -ENOMEM; while ((nbytes > 0) && !eof) { @@ -132,7 +133,7 @@ proc_file_read(struct file * file, const char * buf, size_t nbytes, loff_t *ppos break; } - n -= cfs_copy_to_user((void *)buf, start, n); + n -= copy_to_user((void *)buf, start, n); if (n == 0) { if (retval == 0) retval = -EFAULT; @@ -144,9 +145,9 @@ proc_file_read(struct file * file, const char * buf, size_t nbytes, loff_t *ppos buf += n; retval += n; } - cfs_free(page); + kfree(page); - return retval; + return retval; } static ssize_t @@ -181,10 +182,9 @@ proc_alloc_entry() { cfs_proc_entry_t * entry = NULL; - entry = cfs_mem_cache_alloc(proc_entry_cache, 0); - if (!entry) { - return NULL; - } + entry = kmem_cache_alloc(proc_entry_cache, 0); + if (!entry) + return NULL; memset(entry, 0, sizeof(cfs_proc_entry_t)); @@ -199,11 +199,9 @@ proc_alloc_entry() void proc_free_entry(cfs_proc_entry_t * entry) - { - ASSERT(entry->magic == CFS_PROC_ENTRY_MAGIC); - - cfs_mem_cache_free(proc_entry_cache, entry); + ASSERT(entry->magic == CFS_PROC_ENTRY_MAGIC); + kmem_cache_free(proc_entry_cache, entry); } /* dissect the path string for a given full proc path */ @@ -413,11 +411,10 @@ proc_search_entry( parent = root; entry = NULL; - ename = cfs_alloc(0x21, CFS_ALLOC_ZERO); + ename = kmalloc(0x21, __GFP_ZERO); - if (ename == NULL) { - goto errorout; - } + if (ename == NULL) + goto errorout; again: @@ -452,7 +449,7 @@ again: errorout: if (ename) { - cfs_free(ename); + kfree(ename); } return entry; @@ -494,12 +491,10 @@ again: entry = proc_alloc_entry(); memcpy(entry->name, ename, flen); - if (entry) { - if(!proc_insert_splay(parent, entry)) { - proc_free_entry(entry); - entry = NULL; - } - } + if (entry && !proc_insert_splay(parent, entry)) { + proc_free_entry(entry); + entry = NULL; + } } if (!entry) { @@ -711,17 +706,15 @@ void proc_destory_subtree(cfs_proc_entry_t *entry) void proc_destroy_fs() { - LOCK_PROCFS(); + LOCK_PROCFS(); - if (cfs_proc_root) { - proc_destroy_splay(cfs_proc_root); - } + if (cfs_proc_root) + proc_destroy_splay(cfs_proc_root); - if (proc_entry_cache) { - cfs_mem_cache_destroy(proc_entry_cache); - } + if (proc_entry_cache) + kmem_cache_destroy(proc_entry_cache); - UNLOCK_PROCFS(); + UNLOCK_PROCFS(); } static char proc_item_path[512]; @@ -797,12 +790,8 @@ int proc_init_fs() CFS_INIT_LIST_HEAD(&(root_table_header.ctl_entry)); INIT_PROCFS_LOCK(); - proc_entry_cache = cfs_mem_cache_create( - NULL, - sizeof(cfs_proc_entry_t), - 0, - 0 - ); + proc_entry_cache = kmem_cache_create(NULL, sizeof(cfs_proc_entry_t), + 0, 0, NULL); if (!proc_entry_cache) { return (-ENOMEM); @@ -999,26 +988,27 @@ int sysctl_string(cfs_sysctl_table_t *table, int *name, int nlen, return -ENOTDIR; if (oldval && oldlenp) { - if(get_user(len, oldlenp)) + if (get_user(len, oldlenp)) return -EFAULT; - if (len) { - l = strlen(table->data); - if (len > l) len = l; - if (len >= table->maxlen) - len = table->maxlen; - if(cfs_copy_to_user(oldval, table->data, len)) - return -EFAULT; - if(put_user(0, ((char *) oldval) + len)) - return -EFAULT; - if(put_user(len, oldlenp)) - return -EFAULT; - } + if (len) { + l = strlen(table->data); + if (len > l) + len = l; + if (len >= table->maxlen) + len = table->maxlen; + if (copy_to_user(oldval, table->data, len)) + return -EFAULT; + if (put_user(0, ((char *) oldval) + len)) + return -EFAULT; + if (put_user(len, oldlenp)) + return -EFAULT; + } } if (newval && newlen) { len = newlen; if (len > table->maxlen) len = table->maxlen; - if(cfs_copy_from_user(table->data, newval, len)) + if (copy_from_user(table->data, newval, len)) return -EFAULT; if (len == table->maxlen) len--; @@ -1088,12 +1078,12 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f if (write) { while (left) { char c; - if(get_user(c,(char *) buffer)) - return -EFAULT; + if (get_user(c, (char *)buffer)) + return -EFAULT; if (!isspace(c)) - break; + break; left--; - ((char *) buffer)++; + ((char *)buffer)++; } if (!left) break; @@ -1101,7 +1091,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f len = left; if (len > TMPBUFLEN-1) len = TMPBUFLEN-1; - if(cfs_copy_from_user(buf, buffer, len)) + if (copy_from_user(buf, buffer, len)) return -EFAULT; buf[len] = 0; p = buf; @@ -1119,17 +1109,25 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f val = -val; (char *)buffer += len; left -= len; - switch(op) { - case OP_SET: *i = val; break; - case OP_AND: *i &= val; break; - case OP_OR: *i |= val; break; - case OP_MAX: if(*i < val) - *i = val; - break; - case OP_MIN: if(*i > val) - *i = val; - break; - } + switch(op) { + case OP_SET: + *i = val; + break; + case OP_AND: + *i &= val; + break; + case OP_OR: + *i |= val; + break; + case OP_MAX: + if (*i < val) + *i = val; + break; + case OP_MIN: + if (*i > val) + *i = val; + break; + } } else { p = buf; if (!first) @@ -1138,7 +1136,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f len = strlen(buf); if (len > left) len = left; - if(cfs_copy_to_user(buffer, buf, len)) + if (copy_to_user(buffer, buf, len)) return -EFAULT; left -= len; (char *)buffer += len; @@ -1146,7 +1144,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f } if (!write && !first && left) { - if(put_user('\n', (char *) buffer)) + if (put_user('\n', (char *) buffer)) return -EFAULT; left--, ((char *)buffer)++; } @@ -1154,7 +1152,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f p = (char *) buffer; while (left) { char c; - if(get_user(c, p++)) + if (get_user(c, p++)) return -EFAULT; if (!isspace(c)) break; @@ -1222,7 +1220,7 @@ int proc_dostring(cfs_sysctl_table_t *table, int write, struct file *filp, len = 0; p = buffer; while (len < *lenp) { - if(get_user(c, p++)) + if (get_user(c, p++)) return -EFAULT; if (c == 0 || c == '\n') break; @@ -1230,7 +1228,7 @@ int proc_dostring(cfs_sysctl_table_t *table, int write, struct file *filp, } if (len >= (size_t)table->maxlen) len = (size_t)table->maxlen-1; - if(cfs_copy_from_user(table->data, buffer, len)) + if (copy_from_user(table->data, buffer, len)) return -EFAULT; ((char *) table->data)[len] = 0; filp->f_pos += *lenp; @@ -1241,10 +1239,10 @@ int proc_dostring(cfs_sysctl_table_t *table, int write, struct file *filp, if (len > *lenp) len = *lenp; if (len) - if(cfs_copy_to_user(buffer, table->data, len)) + if (copy_to_user(buffer, table->data, len)) return -EFAULT; if (len < *lenp) { - if(put_user('\n', ((char *) buffer) + len)) + if (put_user('\n', ((char *) buffer) + len)) return -EFAULT; len++; } @@ -1285,9 +1283,9 @@ int do_sysctl_strategy (cfs_sysctl_table_t *table, if (len) { if (len > (size_t)table->maxlen) len = (size_t)table->maxlen; - if(cfs_copy_to_user(oldval, table->data, len)) + if (copy_to_user(oldval, table->data, len)) return -EFAULT; - if(put_user(len, oldlenp)) + if (put_user(len, oldlenp)) return -EFAULT; } } @@ -1295,7 +1293,7 @@ int do_sysctl_strategy (cfs_sysctl_table_t *table, len = newlen; if (len > (size_t)table->maxlen) len = (size_t)table->maxlen; - if(cfs_copy_from_user(table->data, newval, len)) + if (copy_from_user(table->data, newval, len)) return -EFAULT; } } @@ -1366,7 +1364,7 @@ int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp, newval, newlen, head->ctl_table, &context); if (context) - cfs_free(context); + kfree(context); if (error != -ENOTDIR) return error; tmp = tmp->next; @@ -1447,7 +1445,7 @@ struct ctl_table_header *register_sysctl_table(cfs_sysctl_table_t * table, int insert_at_head) { struct ctl_table_header *tmp; - tmp = cfs_alloc(sizeof(struct ctl_table_header), 0); + tmp = kmalloc(sizeof(struct ctl_table_header), 0); if (!tmp) return NULL; tmp->ctl_table = table; @@ -1476,7 +1474,7 @@ void unregister_sysctl_table(struct ctl_table_header * header) #ifdef CONFIG_PROC_FS unregister_proc_table(header->ctl_table, cfs_proc_sys); #endif - cfs_free(header); + kfree(header); } @@ -1658,13 +1656,13 @@ lustre_open_file(char *filename) if (fp == NULL) return NULL; - fh = cfs_alloc(sizeof(*fh), CFS_ALLOC_ZERO); + fh = kmalloc(sizeof(*fh), __GFP_ZERO); if (fh == NULL) return NULL; - fh->f_inode = cfs_alloc(sizeof(struct inode), CFS_ALLOC_ZERO); + fh->f_inode = kmalloc(sizeof(struct inode), __GFP_ZERO); if (!fh->f_inode) { - cfs_free(fh); + kfree(fh); return NULL; } @@ -1678,8 +1676,8 @@ lustre_open_file(char *filename) } if (0 != rc) { - cfs_free(fh->f_inode); - cfs_free(fh); + kfree(fh->f_inode); + kfree(fh); return NULL; } @@ -1699,8 +1697,8 @@ lustre_close_file(struct file *fh) fp->nlink--; } - cfs_free(fh->f_inode); - cfs_free(fh); + kfree(fh->f_inode); + kfree(fh); return rc; } @@ -1877,7 +1875,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) /* if not empty - flush it first */ if (m->count) { n = min(m->count, size); - err = cfs_copy_to_user(buf, m->buf + m->from, n); + err = copy_to_user(buf, m->buf + m->from, n); if (err) goto Efault; m->count -= n; @@ -1903,7 +1901,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) if (m->count < m->size) goto Fill; m->op->stop(m, p); - cfs_free(m->buf); + kfree(m->buf); m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); if (!m->buf) goto Enomem; @@ -1932,7 +1930,7 @@ Fill: } m->op->stop(m, p); n = min(m->count, size); - err = cfs_copy_to_user(buf, m->buf, n); + err = copy_to_user(buf, m->buf, n); if (err) goto Efault; copied += n; @@ -2007,8 +2005,8 @@ static int traverse(struct seq_file *m, loff_t offset) Eoverflow: m->op->stop(m, p); - cfs_free(m->buf); - m->buf = cfs_alloc(m->size <<= 1, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO); + kfree(m->buf); + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | __GFP_ZERO); return !m->buf ? -ENOMEM : -EAGAIN; } @@ -2067,8 +2065,8 @@ int seq_release(struct inode *inode, struct file *file) struct seq_file *m = (struct seq_file *)file->private_data; if (m) { if (m->buf) - cfs_free(m->buf); - cfs_free(m); + kfree(m->buf); + kfree(m); } return 0; } @@ -2195,7 +2193,7 @@ int single_open(struct file *file, int (*show)(struct seq_file *, void *), if (!res) ((struct seq_file *)file->private_data)->private = data; else - cfs_free(op); + kfree(op); } return res; } @@ -2205,7 +2203,7 @@ int single_release(struct inode *inode, struct file *file) { const struct seq_operations *op = ((struct seq_file *)file->private_data)->op; int res = seq_release(inode, file); - cfs_free((void *)op); + kfree((void *)op); return res; } EXPORT_SYMBOL(single_release); @@ -2214,7 +2212,7 @@ int seq_release_private(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; - cfs_free(seq->private); + kfree(seq->private); seq->private = NULL; return seq_release(inode, file); } @@ -2227,7 +2225,7 @@ void *__seq_open_private(struct file *f, const struct seq_operations *ops, void *private; struct seq_file *seq; - private = cfs_alloc(psize, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO); + private = kmalloc(psize, GFP_KERNEL | __GFP_ZERO); if (private == NULL) goto out; @@ -2240,7 +2238,7 @@ void *__seq_open_private(struct file *f, const struct seq_operations *ops, return private; out_free: - cfs_free(private); + kfree(private); out: return NULL; } diff --git a/libcfs/libcfs/winnt/winnt-tcpip.c b/libcfs/libcfs/winnt/winnt-tcpip.c index 50d784f..04f9b66 100644 --- a/libcfs/libcfs/winnt/winnt-tcpip.c +++ b/libcfs/libcfs/winnt/winnt-tcpip.c @@ -360,7 +360,7 @@ KsAllocateKsTsdu() } else { - KsTsdu = (PKS_TSDU) cfs_mem_cache_alloc( + KsTsdu = (PKS_TSDU) kmem_cache_alloc( ks_data.ksnd_tsdu_slab, 0); } @@ -393,7 +393,7 @@ KsFreeKsTsdu( PKS_TSDU KsTsdu ) { - cfs_mem_cache_free( + kmem_cache_free( ks_data.ksnd_tsdu_slab, KsTsdu ); } @@ -3035,7 +3035,7 @@ KsCleanupIpAddresses() list = RemoveHeadList(&ks_data.ksnd_addrs_list); slot = CONTAINING_RECORD(list, ks_addr_slot_t, link); - cfs_free(slot); + kfree(slot); ks_data.ksnd_naddrs--; } @@ -3081,7 +3081,7 @@ KsAddAddressHandler( return; } - slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO); + slot = kmalloc(sizeof(ks_addr_slot_t) + DeviceName->Length, __GFP_ZERO); if (slot != NULL) { spin_lock(&ks_data.ksnd_addrs_lock); InsertTailList(&ks_data.ksnd_addrs_list, &slot->link); @@ -3574,7 +3574,7 @@ KsTcpReceiveCompletionRoutine( /* free the Context structure... */ ASSERT(Context->Magic == KS_TCP_CONTEXT_MAGIC); Context->Magic = 'CDAB'; - cfs_free(Context); + kfree(Context); } /* free the Irp */ @@ -3745,7 +3745,7 @@ KsTcpSendCompletionRoutine( if (context) { ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC); context->Magic = 'CDAB'; - cfs_free(context); + kfree(context); } /* free the Irp structure */ @@ -3854,7 +3854,7 @@ KsTcpReceiveEventHandler( /* there's still data in tdi internal queue, we need issue a new Irp to receive all of them. first allocate the tcp context */ - context = cfs_alloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0); + context = kmalloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0); if (!context) { status = STATUS_INSUFFICIENT_RESOURCES; goto errorout; @@ -3939,7 +3939,7 @@ errorout: if (context) { ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC); context->Magic = 'CDAB'; - cfs_free(context); + kfree(context); } ks_abort_tconn(tconn); @@ -4305,8 +4305,8 @@ ks_create_tconn() ks_tconn_t * tconn = NULL; /* allocate ksoc_tconn_t from the slab cache memory */ - tconn = (ks_tconn_t *)cfs_mem_cache_alloc( - ks_data.ksnd_tconn_slab, CFS_ALLOC_ZERO); + tconn = (ks_tconn_t *)kmem_cache_alloc( + ks_data.ksnd_tconn_slab, __GFP_ZERO); if (tconn) { @@ -4384,7 +4384,7 @@ ks_free_tconn(ks_tconn_t * tconn) spin_unlock(&(ks_data.ksnd_tconn_lock)); /* free the structure memory */ - cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn); + kmem_cache_free(ks_data.ksnd_tconn_slab, tconn); KsPrint((3, "ks_free_tconn: tconn %p is freed.\n", tconn)); } @@ -5645,7 +5645,7 @@ KsBuildSend(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr, length = KsQueryMdlsSize(mdl); /* we need allocate the ks_tx_t structure from memory pool. */ - context = cfs_alloc(sizeof(ks_tdi_tx_t), 0); + context = kmalloc(sizeof(ks_tdi_tx_t), 0); if (!context) { status = STATUS_INSUFFICIENT_RESOURCES; goto errorout; @@ -5696,7 +5696,7 @@ errorout: if (context) { ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC); context->Magic = 'CDAB'; - cfs_free(context); + kfree(context); } /* here need free the Irp. */ @@ -5865,8 +5865,8 @@ ks_init_tdi_data() CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns); cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE); - ks_data.ksnd_tconn_slab = cfs_mem_cache_create( - "tcon", sizeof(ks_tconn_t) , 0, 0); + ks_data.ksnd_tconn_slab = kmem_cache_create("tcon", sizeof(ks_tconn_t), + 0, 0, NULL); if (!ks_data.ksnd_tconn_slab) { rc = -ENOMEM; @@ -5877,8 +5877,8 @@ ks_init_tdi_data() spin_lock_init(&ks_data.ksnd_tsdu_lock); CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus); ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */ - ks_data.ksnd_tsdu_slab = cfs_mem_cache_create( - "tsdu", ks_data.ksnd_tsdu_size, 0, 0); + ks_data.ksnd_tsdu_slab = kmem_cache_create("tsdu", ks_data.ksnd_tsdu_size, + 0, 0, NULL); if (!ks_data.ksnd_tsdu_slab) { rc = -ENOMEM; @@ -5890,8 +5890,8 @@ ks_init_tdi_data() if (ks_data.ksnd_engine_nums < 4) { ks_data.ksnd_engine_nums = 4; } - ks_data.ksnd_engine_mgr = cfs_alloc(sizeof(ks_engine_mgr_t) * - ks_data.ksnd_engine_nums,CFS_ALLOC_ZERO); + ks_data.ksnd_engine_mgr = kmalloc(sizeof(ks_engine_mgr_t) * + ks_data.ksnd_engine_nums, __GFP_ZERO); if (ks_data.ksnd_engine_mgr == NULL) { rc = -ENOMEM; goto errorout; @@ -5912,7 +5912,7 @@ errorout: /* do cleanup in case we get failures */ if (rc < 0) { if (ks_data.ksnd_tconn_slab) { - cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab); +kmem_cache_destroy(ks_data.ksnd_tconn_slab); ks_data.ksnd_tconn_slab = NULL; } } @@ -5967,7 +5967,7 @@ ks_fini_tdi_data() cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0); /* it's safe to delete the tconn slab ... */ - cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab); +kmem_cache_destroy(ks_data.ksnd_tconn_slab); ks_data.ksnd_tconn_slab = NULL; /* clean up all the tsud buffers in the free list */ @@ -5975,14 +5975,14 @@ ks_fini_tdi_data() cfs_list_for_each (list, &ks_data.ksnd_freetsdus) { KsTsdu = cfs_list_entry (list, KS_TSDU, Link); - cfs_mem_cache_free( + kmem_cache_free( ks_data.ksnd_tsdu_slab, KsTsdu ); } spin_unlock(&(ks_data.ksnd_tsdu_lock)); /* it's safe to delete the tsdu slab ... */ - cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab); +kmem_cache_destroy(ks_data.ksnd_tsdu_slab); ks_data.ksnd_tsdu_slab = NULL; /* good! it's smooth to do the cleaning up...*/ @@ -6554,7 +6554,7 @@ int libcfs_ipif_enumerate(char ***names) spin_lock(&ks_data.ksnd_addrs_lock); - *names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO); + *names = kmalloc(sizeof(char *) * ks_data.ksnd_naddrs, __GFP_ZERO); if (*names == NULL) { goto errorout; } @@ -6578,7 +6578,7 @@ errorout: void libcfs_ipif_free_enumeration(char **names, int n) { if (names) { - cfs_free(names); + kfree(names); } } diff --git a/libcfs/libcfs/winnt/winnt-tracefile.c b/libcfs/libcfs/winnt/winnt-tracefile.c index cbfe862..9e34cd3 100644 --- a/libcfs/libcfs/winnt/winnt-tracefile.c +++ b/libcfs/libcfs/winnt/winnt-tracefile.c @@ -62,8 +62,8 @@ int cfs_tracefile_init_arch() memset(cfs_trace_data, 0, sizeof(cfs_trace_data)); for (i = 0; i < CFS_TCD_TYPE_MAX; i++) { cfs_trace_data[i] = - cfs_alloc(sizeof(union cfs_trace_data_union) * \ - CFS_NR_CPUS, CFS_ALLOC_KERNEL); + kmalloc(sizeof(union cfs_trace_data_union) * \ + CFS_NR_CPUS, GFP_KERNEL); if (cfs_trace_data[i] == NULL) goto out; } @@ -78,8 +78,8 @@ int cfs_tracefile_init_arch() for (i = 0; i < cfs_num_possible_cpus(); i++) for (j = 0; j < CFS_TCD_TYPE_MAX; j++) { cfs_trace_console_buffers[i][j] = - cfs_alloc(CFS_TRACE_CONSOLE_BUFFER_SIZE, - CFS_ALLOC_KERNEL); + kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE, + GFP_KERNEL); if (cfs_trace_console_buffers[i][j] == NULL) goto out; @@ -102,14 +102,14 @@ void cfs_tracefile_fini_arch() for (i = 0; i < cfs_num_possible_cpus(); i++) { for (j = 0; j < CFS_TCD_TYPE_MAX; j++) { if (cfs_trace_console_buffers[i][j] != NULL) { - cfs_free(cfs_trace_console_buffers[i][j]); + kfree(cfs_trace_console_buffers[i][j]); cfs_trace_console_buffers[i][j] = NULL; } } } for (i = 0; cfs_trace_data[i] != NULL; i++) { - cfs_free(cfs_trace_data[i]); + kfree(cfs_trace_data[i]); cfs_trace_data[i] = NULL; } @@ -217,7 +217,7 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask, int cfs_trace_max_debug_mb(void) { - int total_mb = (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT)); + int total_mb = (num_physpages >> (20 - PAGE_CACHE_SHIFT)); return MAX(512, (total_mb * 80)/100); } diff --git a/libcfs/libcfs/winnt/winnt-usr.c b/libcfs/libcfs/winnt/winnt-usr.c index a0f630c..c5f2312 100644 --- a/libcfs/libcfs/winnt/winnt-usr.c +++ b/libcfs/libcfs/winnt/winnt-usr.c @@ -1020,7 +1020,7 @@ void* pgalloc(size_t factor) { LPVOID page; - page = VirtualAlloc(NULL, CFS_PAGE_SIZE << factor, + page = VirtualAlloc(NULL, PAGE_CACHE_SIZE << factor, MEM_COMMIT, PAGE_READWRITE); return page; } diff --git a/libcfs/libcfs/winnt/winnt-utils.c b/libcfs/libcfs/winnt/winnt-utils.c index ece517c..5f84d6c 100644 --- a/libcfs/libcfs/winnt/winnt-utils.c +++ b/libcfs/libcfs/winnt/winnt-utils.c @@ -114,7 +114,7 @@ static int idr_pre_get(struct idr_context *idp) while (idp->id_free_cnt < IDR_FREE_MAX) { struct idr_layer *new; - new = cfs_alloc(sizeof(struct idr_layer), CFS_ALLOC_ZERO); + new = kmalloc(sizeof(struct idr_layer), __GFP_ZERO); if(new == NULL) return (0); free_layer(idp, new); @@ -326,7 +326,7 @@ static int _idr_remove(struct idr_context *idp, int id) } while (idp->id_free_cnt >= IDR_FREE_MAX) { p = alloc_layer(idp); - cfs_free(p); + kfree(p); } return 0; } @@ -341,7 +341,7 @@ static int _idr_remove(struct idr_context *idp, int id) struct idr_context *cfs_idr_init() { struct idr_context * idp = NULL; - idp = cfs_alloc(sizeof(struct idr_context), 0); + idp = kmalloc(sizeof(struct idr_context), 0); if (idp) { memset(idp, 0, sizeof(struct idr_context)); } @@ -404,7 +404,7 @@ void *cfs_idr_find(struct idr_context *idp, int id) void cfs_idr_exit(struct idr_context *idp) { if (idp) { - cfs_free(idp); + kfree(idp); } } diff --git a/lnet/include/lnet/darwin/lib-types.h b/lnet/include/lnet/darwin/lib-types.h index 562ca06..5dd29a3 100644 --- a/lnet/include/lnet/darwin/lib-types.h +++ b/lnet/include/lnet/darwin/lib-types.h @@ -46,7 +46,7 @@ /* * XXX Liang: * - * Temporary fix, because lnet_me_free()->cfs_free->FREE() can be blocked in xnu, + * Temporary fix, because lnet_me_free()->kfree->FREE() can be blocked in xnu, * at then same time we've taken LNET_LOCK(), which is a spinlock. * by using LNET_USE_LIB_FREELIST, we can avoid calling of FREE(). * diff --git a/lnet/include/lnet/types.h b/lnet/include/lnet/types.h index eb82bb0..d6cd5ea 100644 --- a/lnet/include/lnet/types.h +++ b/lnet/include/lnet/types.h @@ -326,16 +326,16 @@ typedef struct iovec lnet_md_iovec_t; * A page-based fragment of a MD. */ typedef struct { - /** Pointer to the page where the fragment resides */ - cfs_page_t *kiov_page; - /** Length in bytes of the fragment */ - unsigned int kiov_len; - /** - * Starting offset of the fragment within the page. Note that the - * end of the fragment must not pass the end of the page; i.e., - * kiov_len + kiov_offset <= CFS_PAGE_SIZE. - */ - unsigned int kiov_offset; + /** Pointer to the page where the fragment resides */ + struct page *kiov_page; + /** Length in bytes of the fragment */ + unsigned int kiov_len; + /** + * Starting offset of the fragment within the page. Note that the + * end of the fragment must not pass the end of the page; i.e., + * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. + */ + unsigned int kiov_offset; } lnet_kiov_t; /** @} lnet_md */ diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index 86298dd..adf9a44 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -1188,7 +1188,7 @@ kiblnd_free_pages(kib_pages_t *p) for (i = 0; i < npages; i++) { if (p->ibp_pages[i] != NULL) - cfs_free_page(p->ibp_pages[i]); + __free_page(p->ibp_pages[i]); } LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages])); @@ -1212,7 +1212,7 @@ kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages) for (i = 0; i < npages; i++) { p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, - CFS_ALLOC_IO); + __GFP_IO); if (p->ibp_pages[i] == NULL) { CERROR("Can't allocate page %d of %d\n", i, npages); kiblnd_free_pages(p); diff --git a/lnet/klnds/ptllnd/ptllnd.c b/lnet/klnds/ptllnd/ptllnd.c index 2edf87d..62c3f56 100644 --- a/lnet/klnds/ptllnd/ptllnd.c +++ b/lnet/klnds/ptllnd/ptllnd.c @@ -618,7 +618,7 @@ kptllnd_base_shutdown (void) LASSERT (cfs_list_empty(&kptllnd_data.kptl_idle_txs)); if (kptllnd_data.kptl_rx_cache != NULL) - cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache); + kmem_cache_destroy(kptllnd_data.kptl_rx_cache); if (kptllnd_data.kptl_peers != NULL) LIBCFS_FREE(kptllnd_data.kptl_peers, @@ -797,7 +797,7 @@ kptllnd_base_startup (void) kptllnd_rx_buffer_pool_init(&kptllnd_data.kptl_rx_buffer_pool); kptllnd_data.kptl_rx_cache = - cfs_mem_cache_create("ptllnd_rx", + kmem_cache_create("ptllnd_rx", sizeof(kptl_rx_t) + *kptllnd_tunables.kptl_max_msg_size, 0, /* offset */ diff --git a/lnet/klnds/ptllnd/ptllnd.h b/lnet/klnds/ptllnd/ptllnd.h index 6f511d5..e6fd99b 100644 --- a/lnet/klnds/ptllnd/ptllnd.h +++ b/lnet/klnds/ptllnd/ptllnd.h @@ -270,7 +270,7 @@ struct kptl_data cfs_waitq_t kptl_watchdog_waitq; /* watchdog sleeps here */ kptl_rx_buffer_pool_t kptl_rx_buffer_pool; /* rx buffer pool */ - cfs_mem_cache_t* kptl_rx_cache; /* rx descripter cache */ + struct kmem_cache *kptl_rx_cache; /* rx descripter cache */ cfs_atomic_t kptl_ntx; /* # tx descs allocated */ spinlock_t kptl_tx_lock; /* serialise idle tx list*/ diff --git a/lnet/klnds/ptllnd/ptllnd_rx_buf.c b/lnet/klnds/ptllnd/ptllnd_rx_buf.c index 39ef81f..094326c 100644 --- a/lnet/klnds/ptllnd/ptllnd_rx_buf.c +++ b/lnet/klnds/ptllnd/ptllnd_rx_buf.c @@ -342,7 +342,7 @@ kptllnd_rx_alloc(void) return NULL; } - rx = cfs_mem_cache_alloc(kptllnd_data.kptl_rx_cache, CFS_ALLOC_ATOMIC); + rx = kmem_cache_alloc(kptllnd_data.kptl_rx_cache, GFP_ATOMIC); if (rx == NULL) { CERROR("Failed to allocate rx\n"); return NULL; @@ -390,7 +390,7 @@ kptllnd_rx_done(kptl_rx_t *rx, int post_credit) kptllnd_peer_decref(peer); } - cfs_mem_cache_free(kptllnd_data.kptl_rx_cache, rx); + kmem_cache_free(kptllnd_data.kptl_rx_cache, rx); } void diff --git a/lnet/klnds/socklnd/socklnd_lib-darwin.c b/lnet/klnds/socklnd/socklnd_lib-darwin.c index 9148413..ff50818 100644 --- a/lnet/klnds/socklnd/socklnd_lib-darwin.c +++ b/lnet/klnds/socklnd/socklnd_lib-darwin.c @@ -264,7 +264,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) int i; for (nob = i = 0; i < niov; i++) { - scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + + scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; nob += scratchiov[i].iov_len = kiov[i].kiov_len; } @@ -276,7 +276,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) */ rc = -sock_send(sock, &msg, MSG_DONTWAIT, &sndlen); for (i = 0; i < niov; i++) - cfs_kunmap(kiov[i].kiov_page); + kunmap(kiov[i].kiov_page); if (rc == 0) rc = sndlen; return rc; @@ -351,14 +351,14 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn) /* NB we can't trust socket ops to either consume our iovs * or leave them alone. */ for (nob = i = 0; i < niov; i++) { - scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + \ + scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + \ kiov[i].kiov_offset; nob += scratchiov[i].iov_len = kiov[i].kiov_len; } LASSERT (nob <= conn->ksnc_rx_nob_wanted); rc = -sock_receive(C2B_SOCK(conn->ksnc_sock), &msg, MSG_DONTWAIT, &rcvlen); for (i = 0; i < niov; i++) - cfs_kunmap(kiov[i].kiov_page); + kunmap(kiov[i].kiov_page); if (rc == 0) rc = rcvlen; return (rc); @@ -609,7 +609,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { - scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + + scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; nob += scratchiov[i].iov_len = kiov[i].kiov_len; } @@ -620,7 +620,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) CFS_NET_EX; for (i = 0; i < niov; i++) - cfs_kunmap(kiov[i].kiov_page); + kunmap(kiov[i].kiov_page); if (rc != 0) { if (suio.uio_resid != nob &&\ @@ -800,7 +800,7 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn) CFS_DECL_NET_DATA; for (nob = i = 0; i < niov; i++) { - scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; + scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; nob += scratchiov[i].iov_len = kiov[i].kiov_len; } LASSERT (nob <= conn->ksnc_rx_nob_wanted); @@ -812,7 +812,7 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn) CFS_NET_EX; for (i = 0; i < niov; i++) - cfs_kunmap(kiov[i].kiov_page); + kunmap(kiov[i].kiov_page); if (rc){ if (ruio.uio_resid != nob && \ diff --git a/lnet/klnds/socklnd/socklnd_lib-linux.c b/lnet/klnds/socklnd/socklnd_lib-linux.c index 805af17..1dff915 100644 --- a/lnet/klnds/socklnd/socklnd_lib-linux.c +++ b/lnet/klnds/socklnd/socklnd_lib-linux.c @@ -688,7 +688,8 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, for (nob = i = 0; i < niov; i++) { if ((kiov[i].kiov_offset != 0 && i > 0) || - (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1)) + (kiov[i].kiov_offset + kiov[i].kiov_len != + PAGE_CACHE_SIZE && i < niov - 1)) return NULL; pages[i] = kiov[i].kiov_page; diff --git a/lnet/lnet/api-ni.c b/lnet/lnet/api-ni.c index 6aa6f9e..1df7cca 100644 --- a/lnet/lnet/api-ni.c +++ b/lnet/lnet/api-ni.c @@ -2107,12 +2107,8 @@ lnet_ping (lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_i for (i = 0; i < n_ids; i++) { tmpid.pid = info->pi_pid; tmpid.nid = info->pi_ni[i].ns_nid; -#ifdef __KERNEL__ - if (cfs_copy_to_user(&ids[i], &tmpid, sizeof(tmpid))) + if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid))) goto out_1; -#else - ids[i] = tmpid; -#endif } rc = info->pi_nnis; diff --git a/lnet/lnet/lib-md.c b/lnet/lnet/lib-md.c index f730e24..8008c63 100644 --- a/lnet/lnet/lib-md.c +++ b/lnet/lnet/lib-md.c @@ -137,7 +137,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) for (i = 0; i < (int)niov; i++) { /* We take the page pointer on trust */ if (lmd->md_iov.kiov[i].kiov_offset + - lmd->md_iov.kiov[i].kiov_len > CFS_PAGE_SIZE ) + lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) return -EINVAL; /* invalid length */ total_length += lmd->md_iov.kiov[i].kiov_len; diff --git a/lnet/lnet/lib-move.c b/lnet/lnet/lib-move.c index 5240aac..a531d94 100644 --- a/lnet/lnet/lib-move.c +++ b/lnet/lnet/lib-move.c @@ -368,47 +368,47 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset siov->kiov_len - soffset); this_nob = MIN(this_nob, nob); - if (daddr == NULL) - daddr = ((char *)cfs_kmap(diov->kiov_page)) + - diov->kiov_offset + doffset; - if (saddr == NULL) - saddr = ((char *)cfs_kmap(siov->kiov_page)) + - siov->kiov_offset + soffset; - - /* Vanishing risk of kmap deadlock when mapping 2 pages. - * However in practice at least one of the kiovs will be mapped - * kernel pages and the map/unmap will be NOOPs */ - - memcpy (daddr, saddr, this_nob); - nob -= this_nob; - - if (diov->kiov_len > doffset + this_nob) { - daddr += this_nob; - doffset += this_nob; - } else { - cfs_kunmap(diov->kiov_page); - daddr = NULL; - diov++; - ndiov--; - doffset = 0; - } + if (daddr == NULL) + daddr = ((char *)kmap(diov->kiov_page)) + + diov->kiov_offset + doffset; + if (saddr == NULL) + saddr = ((char *)kmap(siov->kiov_page)) + + siov->kiov_offset + soffset; + + /* Vanishing risk of kmap deadlock when mapping 2 pages. + * However in practice at least one of the kiovs will be mapped + * kernel pages and the map/unmap will be NOOPs */ + + memcpy (daddr, saddr, this_nob); + nob -= this_nob; + + if (diov->kiov_len > doffset + this_nob) { + daddr += this_nob; + doffset += this_nob; + } else { + kunmap(diov->kiov_page); + daddr = NULL; + diov++; + ndiov--; + doffset = 0; + } - if (siov->kiov_len > soffset + this_nob) { - saddr += this_nob; - soffset += this_nob; - } else { - cfs_kunmap(siov->kiov_page); - saddr = NULL; - siov++; - nsiov--; - soffset = 0; - } - } while (nob > 0); + if (siov->kiov_len > soffset + this_nob) { + saddr += this_nob; + soffset += this_nob; + } else { + kunmap(siov->kiov_page); + saddr = NULL; + siov++; + nsiov--; + soffset = 0; + } + } while (nob > 0); - if (daddr != NULL) - cfs_kunmap(diov->kiov_page); - if (saddr != NULL) - cfs_kunmap(siov->kiov_page); + if (daddr != NULL) + kunmap(diov->kiov_page); + if (saddr != NULL) + kunmap(siov->kiov_page); } EXPORT_SYMBOL(lnet_copy_kiov2kiov); @@ -450,7 +450,7 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset this_nob = MIN(this_nob, nob); if (addr == NULL) - addr = ((char *)cfs_kmap(kiov->kiov_page)) + + addr = ((char *)kmap(kiov->kiov_page)) + kiov->kiov_offset + kiovoffset; memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob); @@ -468,17 +468,17 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset addr += this_nob; kiovoffset += this_nob; } else { - cfs_kunmap(kiov->kiov_page); - addr = NULL; - kiov++; - nkiov--; - kiovoffset = 0; - } + kunmap(kiov->kiov_page); + addr = NULL; + kiov++; + nkiov--; + kiovoffset = 0; + } - } while (nob > 0); + } while (nob > 0); - if (addr != NULL) - cfs_kunmap(kiov->kiov_page); + if (addr != NULL) + kunmap(kiov->kiov_page); } EXPORT_SYMBOL(lnet_copy_kiov2iov); @@ -520,34 +520,34 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs this_nob = MIN(this_nob, nob); if (addr == NULL) - addr = ((char *)cfs_kmap(kiov->kiov_page)) + - kiov->kiov_offset + kiovoffset; + addr = ((char *)kmap(kiov->kiov_page)) + + kiov->kiov_offset + kiovoffset; - memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob); - nob -= this_nob; + memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob); + nob -= this_nob; - if (kiov->kiov_len > kiovoffset + this_nob) { - addr += this_nob; - kiovoffset += this_nob; - } else { - cfs_kunmap(kiov->kiov_page); - addr = NULL; - kiov++; - nkiov--; - kiovoffset = 0; - } + if (kiov->kiov_len > kiovoffset + this_nob) { + addr += this_nob; + kiovoffset += this_nob; + } else { + kunmap(kiov->kiov_page); + addr = NULL; + kiov++; + nkiov--; + kiovoffset = 0; + } - if (iov->iov_len > iovoffset + this_nob) { - iovoffset += this_nob; - } else { - iov++; - niov--; - iovoffset = 0; - } - } while (nob > 0); + if (iov->iov_len > iovoffset + this_nob) { + iovoffset += this_nob; + } else { + iov++; + niov--; + iovoffset = 0; + } + } while (nob > 0); - if (addr != NULL) - cfs_kunmap(kiov->kiov_page); + if (addr != NULL) + kunmap(kiov->kiov_page); } EXPORT_SYMBOL(lnet_copy_iov2kiov); @@ -582,14 +582,14 @@ lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst, dst->kiov_page = src->kiov_page; dst->kiov_offset = src->kiov_offset + offset; - if (len <= frag_len) { - dst->kiov_len = len; - LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE); - return (niov); - } + if (len <= frag_len) { + dst->kiov_len = len; + LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); + return niov; + } - dst->kiov_len = frag_len; - LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE); + dst->kiov_len = frag_len; + LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); len -= frag_len; dst++; @@ -907,7 +907,7 @@ lnet_msg2bufpool(lnet_msg_t *msg) rbp = &the_lnet.ln_rtrpools[cpt][0]; LASSERT(msg->msg_len <= LNET_MTU); - while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) { + while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { rbp++; LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); } @@ -2130,7 +2130,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack, libcfs_id2str(target)); return -ENOMEM; } - msg->msg_vmflush = !!cfs_memory_pressure_get(); + msg->msg_vmflush = !!memory_pressure_get(); cpt = lnet_cpt_of_cookie(mdh.cookie); lnet_res_lock(cpt); diff --git a/lnet/lnet/router.c b/lnet/lnet/router.c index 4cef9e8..dd5e9e3 100644 --- a/lnet/lnet/router.c +++ b/lnet/lnet/router.c @@ -1296,7 +1296,7 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages) int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]); while (--npages >= 0) - cfs_free_page(rb->rb_kiov[npages].kiov_page); + __free_page(rb->rb_kiov[npages].kiov_page); LIBCFS_FREE(rb, sz); } @@ -1318,16 +1318,16 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) for (i = 0; i < npages; i++) { page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, - CFS_ALLOC_ZERO | CFS_ALLOC_STD); + __GFP_ZERO | GFP_IOFS); if (page == NULL) { while (--i >= 0) - cfs_free_page(rb->rb_kiov[i].kiov_page); + __free_page(rb->rb_kiov[i].kiov_page); LIBCFS_FREE(rb, sz); return NULL; } - rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE; + rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; rb->rb_kiov[i].kiov_offset = 0; rb->rb_kiov[i].kiov_page = page; } @@ -1489,7 +1489,7 @@ int lnet_rtrpools_alloc(int im_a_router) { lnet_rtrbufpool_t *rtrp; - int large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; + int large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; int small_pages = 1; int nrb_tiny; int nrb_small; diff --git a/lnet/lnet/router_proc.c b/lnet/lnet/router_proc.c index d07ba78..d4382b0 100644 --- a/lnet/lnet/router_proc.c +++ b/lnet/lnet/router_proc.c @@ -261,7 +261,7 @@ int LL_PROC_PROTO(proc_lnet_routes) if (len > *lenp) { /* linux-supplied buffer is too small */ rc = -EINVAL; } else if (len > 0) { /* wrote something */ - if (cfs_copy_to_user(buffer, tmpstr, len)) + if (copy_to_user(buffer, tmpstr, len)) rc = -EFAULT; else { off += 1; @@ -397,7 +397,7 @@ int LL_PROC_PROTO(proc_lnet_routers) if (len > *lenp) { /* linux-supplied buffer is too small */ rc = -EINVAL; } else if (len > 0) { /* wrote something */ - if (cfs_copy_to_user(buffer, tmpstr, len)) + if (copy_to_user(buffer, tmpstr, len)) rc = -EFAULT; else { off += 1; @@ -565,7 +565,7 @@ int LL_PROC_PROTO(proc_lnet_peers) if (len > *lenp) { /* linux-supplied buffer is too small */ rc = -EINVAL; } else if (len > 0) { /* wrote something */ - if (cfs_copy_to_user(buffer, tmpstr, len)) + if (copy_to_user(buffer, tmpstr, len)) rc = -EFAULT; else *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff); @@ -745,7 +745,7 @@ int LL_PROC_PROTO(proc_lnet_nis) if (len > *lenp) { /* linux-supplied buffer is too small */ rc = -EINVAL; } else if (len > 0) { /* wrote something */ - if (cfs_copy_to_user(buffer, tmpstr, len)) + if (copy_to_user(buffer, tmpstr, len)) rc = -EFAULT; else *ppos += 1; diff --git a/lnet/selftest/brw_test.c b/lnet/selftest/brw_test.c index e8b94e7..f905c0c 100644 --- a/lnet/selftest/brw_test.c +++ b/lnet/selftest/brw_test.c @@ -87,7 +87,7 @@ brw_client_init (sfw_test_instance_t *tsi) npg = breq->blk_npg; /* NB: this is not going to work for variable page size, * but we have to keep it for compatibility */ - len = npg * CFS_PAGE_SIZE; + len = npg * PAGE_CACHE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; @@ -99,7 +99,7 @@ brw_client_init (sfw_test_instance_t *tsi) opc = breq->blk_opc; flags = breq->blk_flags; len = breq->blk_len; - npg = (len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; + npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; } if (npg > LNET_MAX_IOV || npg <= 0) @@ -150,9 +150,9 @@ brw_inject_one_error (void) } void -brw_fill_page (cfs_page_t *pg, int pattern, __u64 magic) +brw_fill_page (struct page *pg, int pattern, __u64 magic) { - char *addr = cfs_page_address(pg); + char *addr = page_address(pg); int i; LASSERT (addr != NULL); @@ -164,13 +164,13 @@ brw_fill_page (cfs_page_t *pg, int pattern, __u64 magic) if (pattern == LST_BRW_CHECK_SIMPLE) { memcpy(addr, &magic, BRW_MSIZE); - addr += CFS_PAGE_SIZE - BRW_MSIZE; + addr += PAGE_CACHE_SIZE - BRW_MSIZE; memcpy(addr, &magic, BRW_MSIZE); return; } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < CFS_PAGE_SIZE / BRW_MSIZE; i++) + for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); return; } @@ -180,9 +180,9 @@ brw_fill_page (cfs_page_t *pg, int pattern, __u64 magic) } int -brw_check_page (cfs_page_t *pg, int pattern, __u64 magic) +brw_check_page (struct page *pg, int pattern, __u64 magic) { - char *addr = cfs_page_address(pg); + char *addr = page_address(pg); __u64 data = 0; /* make compiler happy */ int i; @@ -195,7 +195,7 @@ brw_check_page (cfs_page_t *pg, int pattern, __u64 magic) data = *((__u64 *) addr); if (data != magic) goto bad_data; - addr += CFS_PAGE_SIZE - BRW_MSIZE; + addr += PAGE_CACHE_SIZE - BRW_MSIZE; data = *((__u64 *) addr); if (data != magic) goto bad_data; @@ -203,7 +203,7 @@ brw_check_page (cfs_page_t *pg, int pattern, __u64 magic) } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < CFS_PAGE_SIZE / BRW_MSIZE; i++) { + for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { data = *(((__u64 *) addr) + i); if (data != magic) goto bad_data; } @@ -223,7 +223,7 @@ void brw_fill_bulk (srpc_bulk_t *bk, int pattern, __u64 magic) { int i; - cfs_page_t *pg; + struct page *pg; for (i = 0; i < bk->bk_niov; i++) { #ifdef __KERNEL__ @@ -240,7 +240,7 @@ int brw_check_bulk (srpc_bulk_t *bk, int pattern, __u64 magic) { int i; - cfs_page_t *pg; + struct page *pg; for (i = 0; i < bk->bk_niov; i++) { #ifdef __KERNEL__ @@ -283,7 +283,7 @@ brw_client_prep_rpc (sfw_test_unit_t *tsu, opc = breq->blk_opc; flags = breq->blk_flags; npg = breq->blk_npg; - len = npg * CFS_PAGE_SIZE; + len = npg * PAGE_CACHE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; @@ -295,7 +295,7 @@ brw_client_prep_rpc (sfw_test_unit_t *tsu, opc = breq->blk_opc; flags = breq->blk_flags; len = breq->blk_len; - npg = (len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; + npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; } rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); @@ -470,10 +470,10 @@ brw_server_handle(struct srpc_server_rpc *rpc) reply->brw_status = EINVAL; return 0; } - npg = reqst->brw_len >> CFS_PAGE_SHIFT; + npg = reqst->brw_len >> PAGE_CACHE_SHIFT; } else { - npg = (reqst->brw_len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; + npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; } replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; diff --git a/lnet/selftest/conctl.c b/lnet/selftest/conctl.c index 61c7242..68297bd 100644 --- a/lnet/selftest/conctl.c +++ b/lnet/selftest/conctl.c @@ -63,9 +63,8 @@ lst_session_new_ioctl(lstio_session_new_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_ses_namep, - args->lstio_ses_nmlen)) { + if (copy_from_user(name, args->lstio_ses_namep, + args->lstio_ses_nmlen)) { LIBCFS_FREE(name, args->lstio_ses_nmlen + 1); return -EFAULT; } @@ -137,7 +136,7 @@ lst_debug_ioctl(lstio_debug_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, args->lstio_dbg_namep, + if (copy_from_user(name, args->lstio_dbg_namep, args->lstio_dbg_nmlen)) { LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1); @@ -213,9 +212,8 @@ lst_group_add_ioctl(lstio_group_add_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen); return -EFAULT; } @@ -247,9 +245,8 @@ lst_group_del_ioctl(lstio_group_del_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; } @@ -282,9 +279,8 @@ lst_group_update_ioctl(lstio_group_update_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; } @@ -344,7 +340,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, args->lstio_grp_namep, + if (copy_from_user(name, args->lstio_grp_namep, args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); @@ -359,7 +355,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args) LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); if (rc == 0 && - cfs_copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) { + copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) { return -EINVAL; } @@ -408,23 +404,22 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) args->lstio_grp_ndentp == NULL) /* # of node entry */ return -EINVAL; - if (cfs_copy_from_user(&ndent, args->lstio_grp_ndentp, - sizeof(ndent)) || - cfs_copy_from_user(&index, args->lstio_grp_idxp, - sizeof(index))) - return -EFAULT; + if (copy_from_user(&ndent, args->lstio_grp_ndentp, + sizeof(ndent)) || + copy_from_user(&index, args->lstio_grp_idxp, + sizeof(index))) + return -EFAULT; - if (ndent <= 0 || index < 0) - return -EINVAL; - } + if (ndent <= 0 || index < 0) + return -EINVAL; + } - LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); - if (name == NULL) - return -ENOMEM; + LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1); + if (name == NULL) + return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_grp_namep, - args->lstio_grp_nmlen)) { + if (copy_from_user(name, args->lstio_grp_namep, + args->lstio_grp_nmlen)) { LIBCFS_FREE(name, args->lstio_grp_nmlen + 1); return -EFAULT; } @@ -439,12 +434,12 @@ lst_group_info_ioctl(lstio_group_info_args_t *args) if (rc != 0) return rc; - if (args->lstio_grp_dentsp != NULL && - (cfs_copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) || - cfs_copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent)))) - rc = -EFAULT; + if (args->lstio_grp_dentsp != NULL && + (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) || + copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent)))) + rc = -EFAULT; - return 0; + return 0; } int @@ -465,12 +460,11 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { - LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); - return -EFAULT; - } + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { + LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); + return -EFAULT; + } name[args->lstio_bat_nmlen] = 0; @@ -499,12 +493,11 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { - LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); - return -EFAULT; - } + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { + LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); + return -EFAULT; + } name[args->lstio_bat_nmlen] = 0; @@ -535,12 +528,11 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { - LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); - return -EFAULT; - } + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { + LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); + return -EFAULT; + } name[args->lstio_bat_nmlen] = 0; @@ -574,12 +566,11 @@ lst_batch_query_ioctl(lstio_batch_query_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_bat_namep, - args->lstio_bat_nmlen)) { - LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); - return -EFAULT; - } + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { + LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); + return -EFAULT; + } name[args->lstio_bat_nmlen] = 0; @@ -636,9 +627,9 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) args->lstio_bat_ndentp == NULL) /* # of node entry */ return -EINVAL; - if (cfs_copy_from_user(&index, args->lstio_bat_idxp, + if (copy_from_user(&index, args->lstio_bat_idxp, sizeof(index)) || - cfs_copy_from_user(&ndent, args->lstio_bat_ndentp, + copy_from_user(&ndent, args->lstio_bat_ndentp, sizeof(ndent))) return -EFAULT; @@ -650,8 +641,8 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, - args->lstio_bat_namep, args->lstio_bat_nmlen)) { + if (copy_from_user(name, args->lstio_bat_namep, + args->lstio_bat_nmlen)) { LIBCFS_FREE(name, args->lstio_bat_nmlen + 1); return -EFAULT; } @@ -668,12 +659,12 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args) if (rc != 0) return rc; - if (args->lstio_bat_dentsp != NULL && - (cfs_copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) || - cfs_copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent)))) - rc = -EFAULT; + if (args->lstio_bat_dentsp != NULL && + (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) || + copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent)))) + rc = -EFAULT; - return rc; + return rc; } int @@ -701,7 +692,7 @@ lst_stat_query_ioctl(lstio_stat_args_t *args) if (name == NULL) return -ENOMEM; - if (cfs_copy_from_user(name, args->lstio_sta_namep, + if (copy_from_user(name, args->lstio_sta_namep, args->lstio_sta_nmlen)) { LIBCFS_FREE(name, args->lstio_sta_nmlen + 1); return -EFAULT; @@ -753,7 +744,8 @@ int lst_test_add_ioctl(lstio_test_args_t *args) /* have parameter, check if parameter length is valid */ if (args->lstio_tes_param != NULL && (args->lstio_tes_param_len <= 0 || - args->lstio_tes_param_len > CFS_PAGE_SIZE - sizeof(lstcon_test_t))) + args->lstio_tes_param_len > + PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) return -EINVAL; LIBCFS_ALLOC(name, args->lstio_tes_bat_nmlen + 1); @@ -774,19 +766,16 @@ int lst_test_add_ioctl(lstio_test_args_t *args) goto out; } - rc = -EFAULT; - if (cfs_copy_from_user(name, - args->lstio_tes_bat_name, - args->lstio_tes_bat_nmlen) || - cfs_copy_from_user(srcgrp, - args->lstio_tes_sgrp_name, - args->lstio_tes_sgrp_nmlen) || - cfs_copy_from_user(dstgrp, - args->lstio_tes_dgrp_name, - args->lstio_tes_dgrp_nmlen) || - cfs_copy_from_user(param, args->lstio_tes_param, - args->lstio_tes_param_len)) - goto out; + rc = -EFAULT; + if (copy_from_user(name, args->lstio_tes_bat_name, + args->lstio_tes_bat_nmlen) || + copy_from_user(srcgrp, args->lstio_tes_sgrp_name, + args->lstio_tes_sgrp_nmlen) || + copy_from_user(dstgrp, args->lstio_tes_dgrp_name, + args->lstio_tes_dgrp_nmlen) || + copy_from_user(param, args->lstio_tes_param, + args->lstio_tes_param_len)) + goto out; rc = lstcon_test_add(name, args->lstio_tes_type, @@ -797,7 +786,7 @@ int lst_test_add_ioctl(lstio_test_args_t *args) &ret, args->lstio_tes_resultp); if (ret != 0) - rc = (cfs_copy_to_user(args->lstio_tes_retp, &ret, + rc = (copy_to_user(args->lstio_tes_retp, &ret, sizeof(ret))) ? -EFAULT : 0; out: if (name != NULL) @@ -818,25 +807,25 @@ out: int lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data) { - char *buf; - int opc = data->ioc_u32[0]; - int rc; + char *buf; + int opc = data->ioc_u32[0]; + int rc; - if (cmd != IOC_LIBCFS_LNETST) - return -EINVAL; + if (cmd != IOC_LIBCFS_LNETST) + return -EINVAL; - if (data->ioc_plen1 > CFS_PAGE_SIZE) - return -EINVAL; + if (data->ioc_plen1 > PAGE_CACHE_SIZE) + return -EINVAL; - LIBCFS_ALLOC(buf, data->ioc_plen1); - if (buf == NULL) - return -ENOMEM; + LIBCFS_ALLOC(buf, data->ioc_plen1); + if (buf == NULL) + return -ENOMEM; - /* copy in parameter */ - if (cfs_copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) { - LIBCFS_FREE(buf, data->ioc_plen1); - return -EFAULT; - } + /* copy in parameter */ + if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) { + LIBCFS_FREE(buf, data->ioc_plen1); + return -EFAULT; + } mutex_lock(&console_session.ses_mutex); @@ -918,15 +907,15 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data) rc = -EINVAL; } - if (cfs_copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat, - sizeof(lstcon_trans_stat_t))) - rc = -EFAULT; + if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat, + sizeof(lstcon_trans_stat_t))) + rc = -EFAULT; out: mutex_unlock(&console_session.ses_mutex); - LIBCFS_FREE(buf, data->ioc_plen1); + LIBCFS_FREE(buf, data->ioc_plen1); - return rc; + return rc; } EXPORT_SYMBOL(lstcon_ioctl_entry); diff --git a/lnet/selftest/conrpc.c b/lnet/selftest/conrpc.c index 665a9a5..856caf7 100644 --- a/lnet/selftest/conrpc.c +++ b/lnet/selftest/conrpc.c @@ -160,7 +160,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc) if (bulk->bk_iovs[i].kiov_page == NULL) continue; - cfs_free_page(bulk->bk_iovs[i].kiov_page); + __free_page(bulk->bk_iovs[i].kiov_page); } srpc_client_rpc_decref(crpc->crp_rpc); @@ -494,7 +494,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list, lstcon_rpc_t, crp_link) { - if (cfs_copy_from_user(&tmp, next, + if (copy_from_user(&tmp, next, sizeof(cfs_list_t))) return -EFAULT; @@ -515,35 +515,36 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans, (cfs_time_t)console_session.ses_id.ses_stamp); cfs_duration_usec(dur, &tv); - if (cfs_copy_to_user(&ent->rpe_peer, - &nd->nd_id, sizeof(lnet_process_id_t)) || - cfs_copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) || - cfs_copy_to_user(&ent->rpe_state, - &nd->nd_state, sizeof(nd->nd_state)) || - cfs_copy_to_user(&ent->rpe_rpc_errno, &error, - sizeof(error))) - return -EFAULT; + if (copy_to_user(&ent->rpe_peer, + &nd->nd_id, sizeof(lnet_process_id_t)) || + copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) || + copy_to_user(&ent->rpe_state, + &nd->nd_state, sizeof(nd->nd_state)) || + copy_to_user(&ent->rpe_rpc_errno, &error, + sizeof(error))) + return -EFAULT; - if (error != 0) - continue; + if (error != 0) + continue; - /* RPC is done */ - rep = (srpc_generic_reply_t *)&msg->msg_body.reply; + /* RPC is done */ + rep = (srpc_generic_reply_t *)&msg->msg_body.reply; - if (cfs_copy_to_user(&ent->rpe_sid, - &rep->sid, sizeof(lst_sid_t)) || - cfs_copy_to_user(&ent->rpe_fwk_errno, - &rep->status, sizeof(rep->status))) - return -EFAULT; + if (copy_to_user(&ent->rpe_sid, + &rep->sid, sizeof(lst_sid_t)) || + copy_to_user(&ent->rpe_fwk_errno, + &rep->status, sizeof(rep->status))) + return -EFAULT; - if (readent == NULL) - continue; + if (readent == NULL) + continue; - if ((error = readent(trans->tas_opc, msg, ent)) != 0) - return error; - } + error = readent(trans->tas_opc, msg, ent); + if (error != 0) + return error; + } - return 0; + return 0; } void @@ -720,7 +721,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov) LASSERT (i < nkiov); - pid = (lnet_process_id_packed_t *)cfs_page_address(kiov[i].kiov_page); + pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page); return &pid[idx % SFW_ID_PER_PAGE]; } @@ -797,11 +798,12 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) { test_bulk_req_t *brq = &req->tsr_u.bulk_v0; - brq->blk_opc = param->blk_opc; - brq->blk_npg = (param->blk_size + CFS_PAGE_SIZE - 1) / CFS_PAGE_SIZE; - brq->blk_flags = param->blk_flags; + brq->blk_opc = param->blk_opc; + brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / + PAGE_CACHE_SIZE; + brq->blk_flags = param->blk_flags; - return 0; + return 0; } int @@ -833,7 +835,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, if (transop == LST_TRANS_TSBCLIADD) { npg = sfw_id_pages(test->tes_span); nob = (feats & LST_FEAT_BULK_LEN) == 0 ? - npg * CFS_PAGE_SIZE : + npg * PAGE_CACHE_SIZE : sizeof(lnet_process_id_packed_t) * test->tes_span; } @@ -860,13 +862,13 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, LASSERT(nob > 0); len = (feats & LST_FEAT_BULK_LEN) == 0 ? - CFS_PAGE_SIZE : min_t(int, nob, CFS_PAGE_SIZE); + PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE); nob -= len; bulk->bk_iovs[i].kiov_offset = 0; bulk->bk_iovs[i].kiov_len = len; bulk->bk_iovs[i].kiov_page = - cfs_alloc_page(CFS_ALLOC_STD); + alloc_page(GFP_IOFS); if (bulk->bk_iovs[i].kiov_page == NULL) { lstcon_rpc_put(*crpc); diff --git a/lnet/selftest/console.c b/lnet/selftest/console.c index 04c1a38..9689506 100644 --- a/lnet/selftest/console.c +++ b/lnet/selftest/console.c @@ -392,9 +392,9 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg, case LST_TRANS_SESQRY: rep = &msg->msg_body.dbg_reply; - if (cfs_copy_to_user(&ent_up->rpe_priv[0], + if (copy_to_user(&ent_up->rpe_priv[0], &rep->dbg_timeout, sizeof(int)) || - cfs_copy_to_user(&ent_up->rpe_payload[0], + copy_to_user(&ent_up->rpe_payload[0], &rep->dbg_name, LST_NAME_SIZE)) return -EFAULT; @@ -426,7 +426,7 @@ lstcon_group_nodes_add(lstcon_group_t *grp, } for (i = 0 ; i < count; i++) { - if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) { + if (copy_from_user(&id, &ids_up[i], sizeof(id))) { rc = -EFAULT; break; } @@ -495,7 +495,7 @@ lstcon_group_nodes_remove(lstcon_group_t *grp, } for (i = 0; i < count; i++) { - if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) { + if (copy_from_user(&id, &ids_up[i], sizeof(id))) { rc = -EFAULT; goto error; } @@ -740,7 +740,7 @@ lstcon_group_list(int index, int len, char *name_up) cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list, lstcon_group_t, grp_link) { if (index-- == 0) { - return cfs_copy_to_user(name_up, grp->grp_name, len) ? + return copy_to_user(name_up, grp->grp_name, len) ? -EFAULT : 0; } } @@ -770,9 +770,9 @@ lstcon_nodes_getent(cfs_list_t *head, int *index_p, break; nd = ndl->ndl_node; - if (cfs_copy_to_user(&dents_up[count].nde_id, + if (copy_to_user(&dents_up[count].nde_id, &nd->nd_id, sizeof(nd->nd_id)) || - cfs_copy_to_user(&dents_up[count].nde_state, + copy_to_user(&dents_up[count].nde_state, &nd->nd_state, sizeof(nd->nd_state))) return -EFAULT; @@ -827,7 +827,7 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t *gents_p, lstcon_ndlink_t, ndl_link) LST_NODE_STATE_COUNTER(ndl->ndl_node, gentp); - rc = cfs_copy_to_user(gents_p, gentp, + rc = copy_to_user(gents_p, gentp, sizeof(lstcon_ndlist_ent_t)) ? -EFAULT: 0; LIBCFS_FREE(gentp, sizeof(lstcon_ndlist_ent_t)); @@ -924,7 +924,7 @@ lstcon_batch_list(int index, int len, char *name_up) cfs_list_for_each_entry_typed(bat, &console_session.ses_bat_list, lstcon_batch_t, bat_link) { if (index-- == 0) { - return cfs_copy_to_user(name_up,bat->bat_name, len) ? + return copy_to_user(name_up, bat->bat_name, len) ? -EFAULT: 0; } } @@ -1000,7 +1000,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t *ent_up, int server, cfs_list_for_each_entry_typed(ndl, srvlst, lstcon_ndlink_t, ndl_link) LST_NODE_STATE_COUNTER(ndl->ndl_node, &entp->tbe_srv_nle); - rc = cfs_copy_to_user(ent_up, entp, + rc = copy_to_user(ent_up, entp, sizeof(lstcon_test_batch_ent_t)) ? -EFAULT : 0; LIBCFS_FREE(entp, sizeof(lstcon_test_batch_ent_t)); @@ -1369,7 +1369,7 @@ lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg, transop == LST_TRANS_TSBSRVQRY); /* positive errno, framework error code */ - if (cfs_copy_to_user(&ent_up->rpe_priv[0], + if (copy_to_user(&ent_up->rpe_priv[0], &rep->bar_active, sizeof(rep->bar_active))) return -EFAULT; @@ -1454,9 +1454,9 @@ lstcon_statrpc_readent(int transop, srpc_msg_t *msg, srpc_stat = (srpc_counters_t *)((char *)sfwk_stat + sizeof(*sfwk_stat)); lnet_stat = (lnet_counters_t *)((char *)srpc_stat + sizeof(*srpc_stat)); - if (cfs_copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) || - cfs_copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) || - cfs_copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat))) + if (copy_to_user(sfwk_stat, &rep->str_fw, sizeof(*sfwk_stat)) || + copy_to_user(srpc_stat, &rep->str_rpc, sizeof(*srpc_stat)) || + copy_to_user(lnet_stat, &rep->str_lnet, sizeof(*lnet_stat))) return -EFAULT; return 0; @@ -1524,7 +1524,7 @@ lstcon_nodes_stat(int count, lnet_process_id_t *ids_up, } for (i = 0 ; i < count; i++) { - if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) { + if (copy_from_user(&id, &ids_up[i], sizeof(id))) { rc = -EFAULT; break; } @@ -1636,7 +1636,7 @@ lstcon_nodes_debug(int timeout, } for (i = 0; i < count; i++) { - if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) { + if (copy_from_user(&id, &ids_up[i], sizeof(id))) { rc = -EFAULT; break; } @@ -1739,7 +1739,7 @@ lstcon_session_new(char *name, int key, unsigned feats, return rc; } - if (cfs_copy_to_user(sid_up, &console_session.ses_id, + if (copy_to_user(sid_up, &console_session.ses_id, sizeof(lst_sid_t)) == 0) return rc; @@ -1769,14 +1769,14 @@ lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp, lstcon_ndlink_t, ndl_link) LST_NODE_STATE_COUNTER(ndl->ndl_node, entp); - if (cfs_copy_to_user(sid_up, &console_session.ses_id, + if (copy_to_user(sid_up, &console_session.ses_id, sizeof(lst_sid_t)) || - cfs_copy_to_user(key_up, &console_session.ses_key, + copy_to_user(key_up, &console_session.ses_key, sizeof(*key_up)) || - cfs_copy_to_user(featp, &console_session.ses_features, + copy_to_user(featp, &console_session.ses_features, sizeof(*featp)) || - cfs_copy_to_user(ndinfo_up, entp, sizeof(*entp)) || - cfs_copy_to_user(name_up, console_session.ses_name, len)) + copy_to_user(ndinfo_up, entp, sizeof(*entp)) || + copy_to_user(name_up, console_session.ses_name, len)) rc = -EFAULT; LIBCFS_FREE(entp, sizeof(*entp)); diff --git a/lnet/selftest/framework.c b/lnet/selftest/framework.c index 506010e..305659e 100644 --- a/lnet/selftest/framework.c +++ b/lnet/selftest/framework.c @@ -814,10 +814,10 @@ sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc) int j; #ifdef __KERNEL__ - dests = cfs_page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); - LASSERT (dests != NULL); /* my pages are within KVM always */ + dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page); + LASSERT (dests != NULL); /* my pages are within KVM always */ #else - dests = cfs_page_address(bk->bk_pages[i / SFW_ID_PER_PAGE]); + dests = page_address(bk->bk_pages[i / SFW_ID_PER_PAGE]); #endif id = dests[i % SFW_ID_PER_PAGE]; if (msg->msg_magic != SRPC_MSG_MAGIC) @@ -1202,7 +1202,7 @@ sfw_add_test (srpc_server_rpc_t *rpc) int len; if ((sn->sn_features & LST_FEAT_BULK_LEN) == 0) { - len = npg * CFS_PAGE_SIZE; + len = npg * PAGE_CACHE_SIZE; } else { len = sizeof(lnet_process_id_packed_t) * diff --git a/lnet/selftest/rpc.c b/lnet/selftest/rpc.c index b30301a..301a31a 100644 --- a/lnet/selftest/rpc.c +++ b/lnet/selftest/rpc.c @@ -88,9 +88,9 @@ void srpc_set_counters (const srpc_counters_t *cnt) } int -srpc_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i, int nob) +srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) { - nob = min(nob, (int)CFS_PAGE_SIZE); + nob = min(nob, (int)PAGE_CACHE_SIZE); LASSERT(nob > 0); LASSERT(i >= 0 && i < bk->bk_niov); @@ -104,7 +104,7 @@ srpc_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i, int nob) bk->bk_pages[i] = pg; bk->bk_iovs[i].iov_len = nob; - bk->bk_iovs[i].iov_base = cfs_page_address(pg); + bk->bk_iovs[i].iov_base = page_address(pg); #endif return nob; } @@ -113,7 +113,7 @@ void srpc_free_bulk (srpc_bulk_t *bk) { int i; - cfs_page_t *pg; + struct page *pg; LASSERT (bk != NULL); #ifndef __KERNEL__ @@ -128,11 +128,11 @@ srpc_free_bulk (srpc_bulk_t *bk) #endif if (pg == NULL) break; - cfs_free_page(pg); + __free_page(pg); } #ifndef __KERNEL__ - LIBCFS_FREE(bk->bk_pages, sizeof(cfs_page_t *) * bk->bk_niov); + LIBCFS_FREE(bk->bk_pages, sizeof(struct page *) * bk->bk_niov); #endif LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov])); return; @@ -142,7 +142,7 @@ srpc_bulk_t * srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) { srpc_bulk_t *bk; - cfs_page_t **pages; + struct page **pages; int i; LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV); @@ -160,24 +160,24 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink) bk->bk_niov = bulk_npg; #ifndef __KERNEL__ LIBCFS_CPT_ALLOC(pages, lnet_cpt_table(), cpt, - sizeof(cfs_page_t *) * bulk_npg); + sizeof(struct page *) * bulk_npg); if (pages == NULL) { LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bulk_npg])); CERROR("Can't allocate page array for %d pages\n", bulk_npg); return NULL; } - memset(pages, 0, sizeof(cfs_page_t *) * bulk_npg); + memset(pages, 0, sizeof(struct page *) * bulk_npg); bk->bk_pages = pages; #else UNUSED(pages); #endif for (i = 0; i < bulk_npg; i++) { - cfs_page_t *pg; + struct page *pg; int nob; - pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, CFS_ALLOC_STD); + pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_IOFS); if (pg == NULL) { CERROR("Can't allocate page %d of %d\n", i, bulk_npg); srpc_free_bulk(bk); diff --git a/lnet/selftest/selftest.h b/lnet/selftest/selftest.h index 52f002f..3fa44f4 100644 --- a/lnet/selftest/selftest.h +++ b/lnet/selftest/selftest.h @@ -170,7 +170,7 @@ typedef struct { #ifdef __KERNEL__ lnet_kiov_t bk_iovs[0]; #else - cfs_page_t **bk_pages; + struct page **bk_pages; lnet_md_iovec_t bk_iovs[0]; #endif } srpc_bulk_t; /* bulk descriptor */ @@ -399,10 +399,10 @@ typedef struct sfw_test_instance { } tsi_u; } sfw_test_instance_t; -/* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at +/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at * the end of pages are not used */ #define SFW_MAX_CONCUR LST_MAX_CONCUR -#define SFW_ID_PER_PAGE (CFS_PAGE_SIZE / sizeof(lnet_process_id_packed_t)) +#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) @@ -433,7 +433,7 @@ void sfw_post_rpc(srpc_client_rpc_t *rpc); void sfw_client_rpc_done(srpc_client_rpc_t *rpc); void sfw_unpack_message(srpc_msg_t *msg); void sfw_free_pages(srpc_server_rpc_t *rpc); -void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i); +void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i); int sfw_alloc_pages(srpc_server_rpc_t *rpc, int cpt, int npages, int len, int sink); int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply); diff --git a/lnet/utils/lst.c b/lnet/utils/lst.c index 63db07d..34c3e8d 100644 --- a/lnet/utils/lst.c +++ b/lnet/utils/lst.c @@ -2933,7 +2933,7 @@ lst_get_bulk_param(int argc, char **argv, lst_test_bulk_param_t *bulk) else if (*end == 'm' || *end == 'M') bulk->blk_size *= 1024 * 1024; - if (bulk->blk_size > CFS_PAGE_SIZE * LNET_MAX_IOV) { + if (bulk->blk_size > PAGE_CACHE_SIZE * LNET_MAX_IOV) { fprintf(stderr, "Size exceed limitation: %d bytes\n", bulk->blk_size); return -1; diff --git a/lustre/fld/fld_cache.c b/lustre/fld/fld_cache.c index 2b6a4e4..2254cd3 100644 --- a/lustre/fld/fld_cache.c +++ b/lustre/fld/fld_cache.c @@ -277,7 +277,7 @@ static void fld_cache_punch_hole(struct fld_cache *cache, struct fld_cache_entry *fldt; ENTRY; - OBD_ALLOC_GFP(fldt, sizeof *fldt, CFS_ALLOC_ATOMIC); + OBD_ALLOC_GFP(fldt, sizeof *fldt, GFP_ATOMIC); if (!fldt) { OBD_FREE_PTR(f_new); EXIT; diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 73925eb..517ce27 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -322,7 +322,7 @@ struct cl_object_operations { * to be used instead of newly created. */ int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_page *page, struct page *vmpage); /** * Initialize lock slice for this layer. Called top-to-bottom through * every object layer when a new cl_lock is instantiated. Layer @@ -481,7 +481,7 @@ struct cl_object_header { * corresponding radix tree at the corresponding logical offset. * * cl_page is associated with VM page of the hosting environment (struct - * page in Linux kernel, for example), cfs_page_t. It is assumed, that this + * page in Linux kernel, for example), struct page. It is assumed, that this * association is implemented by one of cl_page layers (top layer in the * current design) that * @@ -491,7 +491,7 @@ struct cl_object_header { * - translates state (page flag bits) and locking between lustre and * environment. * - * The association between cl_page and cfs_page_t is immutable and + * The association between cl_page and struct page is immutable and * established when cl_page is created. * * cl_page can be "owned" by a particular cl_io (see below), guaranteeing @@ -500,7 +500,7 @@ struct cl_object_header { * eviction of the page from the memory). Note, that in general cl_io * cannot be identified with a particular thread, and page ownership is not * exactly equal to the current thread holding a lock on the page. Layer - * implementing association between cl_page and cfs_page_t has to implement + * implementing association between cl_page and struct page has to implement * ownership on top of available synchronization mechanisms. * * While lustre client maintains the notion of an page ownership by io, @@ -534,7 +534,7 @@ struct cl_object_header { * - by doing a lookup in the cl_object radix tree, protected by the * spin-lock; * - * - by starting from VM-locked cfs_page_t and following some + * - by starting from VM-locked struct page and following some * hosting environment method (e.g., following ->private pointer in * the case of Linux kernel), see cl_vmpage_page(); * @@ -561,7 +561,7 @@ struct cl_object_header { * * Linux Kernel implementation. * - * Binding between cl_page and cfs_page_t (which is a typedef for + * Binding between cl_page and struct page (which is a typedef for * struct page) is implemented in the vvp layer. cl_page is attached to the * ->private pointer of the struct page, together with the setting of * PG_private bit in page->flags, and acquiring additional reference on the @@ -710,7 +710,7 @@ enum cl_page_flags { }; /** - * Fields are protected by the lock on cfs_page_t, except for atomics and + * Fields are protected by the lock on struct page, except for atomics and * immutables. * * \invariant Data type invariants are in cl_page_invariant(). Basically: @@ -835,7 +835,7 @@ enum cl_req_type { */ struct cl_page_operations { /** - * cl_page<->cfs_page_t methods. Only one layer in the stack has to + * cl_page<->struct page methods. Only one layer in the stack has to * implement these. Current code assumes that this functionality is * provided by the topmost layer, see cl_page_disown0() as an example. */ @@ -843,7 +843,7 @@ struct cl_page_operations { /** * \return the underlying VM page. Optional. */ - cfs_page_t *(*cpo_vmpage)(const struct lu_env *env, + struct page *(*cpo_vmpage)(const struct lu_env *env, const struct cl_page_slice *slice); /** * Called when \a io acquires this page into the exclusive @@ -2808,9 +2808,9 @@ void cl_page_print (const struct lu_env *env, void *cookie, void cl_page_header_print(const struct lu_env *env, void *cookie, lu_printer_t printer, const struct cl_page *pg); -cfs_page_t *cl_page_vmpage (const struct lu_env *env, +struct page *cl_page_vmpage (const struct lu_env *env, struct cl_page *page); -struct cl_page *cl_vmpage_page (cfs_page_t *vmpage, struct cl_object *obj); +struct cl_page *cl_vmpage_page (struct page *vmpage, struct cl_object *obj); struct cl_page *cl_page_top (struct cl_page *page); const struct cl_page_slice *cl_page_at(const struct cl_page *page, diff --git a/lustre/include/lclient.h b/lustre/include/lclient.h index ae8f0c7..a24460f 100644 --- a/lustre/include/lclient.h +++ b/lustre/include/lclient.h @@ -234,7 +234,7 @@ struct ccc_page { */ cfs_list_t cpg_pending_linkage; /** VM page */ - cfs_page_t *cpg_page; + struct page *cpg_page; }; static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice) @@ -242,7 +242,7 @@ static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice) return container_of(slice, struct ccc_page, cpg_cl); } -struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage); +struct cl_page *ccc_vmpage_page_transient(struct page *vmpage); struct ccc_device { struct cl_device cdv_cl; @@ -304,7 +304,7 @@ int ccc_object_glimpse(const struct lu_env *env, const struct cl_object *obj, struct ost_lvb *lvb); int ccc_conf_set(const struct lu_env *env, struct cl_object *obj, const struct cl_object_conf *conf); -cfs_page_t *ccc_page_vmpage(const struct lu_env *env, +struct page *ccc_page_vmpage(const struct lu_env *env, const struct cl_page_slice *slice); int ccc_page_is_under_lock(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *io); @@ -373,14 +373,14 @@ struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice); struct ccc_io *cl2ccc_io (const struct lu_env *env, const struct cl_io_slice *slice); struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice); -cfs_page_t *cl2vm_page (const struct cl_page_slice *slice); +struct page *cl2vm_page (const struct cl_page_slice *slice); struct inode *ccc_object_inode(const struct cl_object *obj); struct ccc_object *cl_inode2ccc (struct inode *inode); int cl_setattr_ost(struct inode *inode, const struct iattr *attr, struct obd_capa *capa); -struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage); +struct cl_page *ccc_vmpage_page_transient(struct page *vmpage); int ccc_object_invariant(const struct cl_object *obj); int cl_file_inode_init(struct inode *inode, struct lustre_md *md); void cl_inode_fini(struct inode *inode); diff --git a/lustre/include/liblustre.h b/lustre/include/liblustre.h index ad88587..cf41790 100644 --- a/lustre/include/liblustre.h +++ b/lustre/include/liblustre.h @@ -163,8 +163,8 @@ void cfs_get_random_bytes(void *ptr, int size); /* memory */ /* memory size: used for some client tunables */ -#define cfs_num_physpages (256 * 1024) /* 1GB */ -#define CFS_NUM_CACHEPAGES cfs_num_physpages +#define num_physpages (256 * 1024) /* 1GB */ +#define NUM_CACHEPAGES num_physpages /* VFS stuff */ diff --git a/lustre/include/lu_object.h b/lustre/include/lu_object.h index 5a7551f..0d78047 100644 --- a/lustre/include/lu_object.h +++ b/lustre/include/lu_object.h @@ -1144,7 +1144,7 @@ struct lu_context_key { { \ type *value; \ \ - CLASSERT(CFS_PAGE_SIZE >= sizeof (*value)); \ + CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ \ OBD_ALLOC_PTR(value); \ if (value == NULL) \ @@ -1331,7 +1331,7 @@ int lu_global_init(void); void lu_global_fini(void); struct lu_kmem_descr { - cfs_mem_cache_t **ckd_cache; + struct kmem_cache **ckd_cache; const char *ckd_name; const size_t ckd_size; }; diff --git a/lustre/include/lustre/lustre_idl.h b/lustre/include/lustre/lustre_idl.h index f2900b4..ee5406e 100644 --- a/lustre/include/lustre/lustre_idl.h +++ b/lustre/include/lustre/lustre_idl.h @@ -1087,16 +1087,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent) * MDS_READPAGE page size * * This is the directory page size packed in MDS_READPAGE RPC. - * It's different than CFS_PAGE_SIZE because the client needs to + * It's different than PAGE_CACHE_SIZE because the client needs to * access the struct lu_dirpage header packed at the beginning of * the "page" and without this there isn't any way to know find the - * lu_dirpage header is if client and server CFS_PAGE_SIZE differ. + * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. */ #define LU_PAGE_SHIFT 12 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) -#define LU_PAGE_COUNT (1 << (CFS_PAGE_SHIFT - LU_PAGE_SHIFT)) +#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) /** @} lu_dir */ diff --git a/lustre/include/lustre_capa.h b/lustre/include/lustre_capa.h index abc1033..6c31c45 100644 --- a/lustre/include/lustre_capa.h +++ b/lustre/include/lustre_capa.h @@ -179,7 +179,7 @@ typedef int (* renew_capa_cb_t)(struct obd_capa *, struct lustre_capa *); extern cfs_list_t capa_list[]; extern spinlock_t capa_lock; extern int capa_count[]; -extern cfs_mem_cache_t *capa_cachep; +extern struct kmem_cache *capa_cachep; cfs_hlist_head_t *init_capa_hash(void); void cleanup_capa_hash(cfs_hlist_head_t *hash); diff --git a/lustre/include/lustre_debug.h b/lustre/include/lustre_debug.h index f047347..cdb2f9d 100644 --- a/lustre/include/lustre_debug.h +++ b/lustre/include/lustre_debug.h @@ -57,9 +57,9 @@ #define ASSERT_MAX_SIZE_MB 60000ULL #define ASSERT_PAGE_INDEX(index, OP) \ -do { if (index > ASSERT_MAX_SIZE_MB << (20 - CFS_PAGE_SHIFT)) { \ +do { if (index > ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)) { \ CERROR("bad page index %lu > %llu\n", index, \ - ASSERT_MAX_SIZE_MB << (20 - CFS_PAGE_SHIFT)); \ + ASSERT_MAX_SIZE_MB << (20 - PAGE_CACHE_SHIFT)); \ libcfs_debug = ~0UL; \ OP; \ }} while(0) diff --git a/lustre/include/lustre_disk.h b/lustre/include/lustre_disk.h index eef6a04..5081819 100644 --- a/lustre/include/lustre_disk.h +++ b/lustre/include/lustre_disk.h @@ -262,14 +262,14 @@ struct lustre_mount_data { /* * This limit is arbitrary (131072 clients on x86), but it is convenient to use - * 2^n * CFS_PAGE_SIZE * 8 for the number of bits that fit an order-n allocation. + * 2^n * PAGE_CACHE_SIZE * 8 for the number of bits that fit an order-n allocation. * If we need more than 131072 clients (order-2 allocation on x86) then this * should become an array of single-page pointers that are allocated on demand. */ -#if (128 * 1024UL) > (CFS_PAGE_SIZE * 8) +#if (128 * 1024UL) > (PAGE_CACHE_SIZE * 8) #define LR_MAX_CLIENTS (128 * 1024UL) #else -#define LR_MAX_CLIENTS (CFS_PAGE_SIZE * 8) +#define LR_MAX_CLIENTS (PAGE_CACHE_SIZE * 8) #endif /** COMPAT_146: this is an OST (temporary) */ diff --git a/lustre/include/lustre_idmap.h b/lustre/include/lustre_idmap.h index b64a71b..f406d63 100644 --- a/lustre/include/lustre_idmap.h +++ b/lustre/include/lustre_idmap.h @@ -49,7 +49,7 @@ #include -#define CFS_NGROUPS_PER_BLOCK ((int)(CFS_PAGE_SIZE / sizeof(gid_t))) +#define CFS_NGROUPS_PER_BLOCK ((int)(PAGE_CACHE_SIZE / sizeof(gid_t))) #define CFS_GROUP_AT(gi, i) \ ((gi)->blocks[(i) / CFS_NGROUPS_PER_BLOCK][(i) % CFS_NGROUPS_PER_BLOCK]) diff --git a/lustre/include/lustre_lib.h b/lustre/include/lustre_lib.h index 92fc0d1..bc876ab 100644 --- a/lustre/include/lustre_lib.h +++ b/lustre/include/lustre_lib.h @@ -363,7 +363,7 @@ static inline int obd_ioctl_getdata(char **buf, int *len, void *arg) int offset = 0; ENTRY; - err = cfs_copy_from_user(&hdr, (void *)arg, sizeof(hdr)); + err = copy_from_user(&hdr, (void *)arg, sizeof(hdr)); if (err) RETURN(err); @@ -392,7 +392,7 @@ static inline int obd_ioctl_getdata(char **buf, int *len, void *arg) *len = hdr.ioc_len; data = (struct obd_ioctl_data *)*buf; - err = cfs_copy_from_user(*buf, (void *)arg, hdr.ioc_len); + err = copy_from_user(*buf, (void *)arg, hdr.ioc_len); if (err) { OBD_FREE_LARGE(*buf, hdr.ioc_len); RETURN(err); @@ -428,10 +428,10 @@ static inline int obd_ioctl_getdata(char **buf, int *len, void *arg) static inline int obd_ioctl_popdata(void *arg, void *data, int len) { - int err = cfs_copy_to_user(arg, data, len); - if (err) - err = -EFAULT; - return err; + int err = copy_to_user(arg, data, len); + if (err) + err = -EFAULT; + return err; } #endif diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index 627acce..724fe0e4 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -108,13 +108,13 @@ */ #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) -#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE -#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ @@ -122,8 +122,8 @@ # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) # error "PTLRPC_MAX_BRW_PAGES isn't a power of two" # endif -# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE" +# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) +# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" # endif # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_SIZE too big" @@ -465,7 +465,7 @@ */ /* depress threads factor for VM with small memory size */ #define OSS_THR_FACTOR min_t(int, 8, \ - CFS_NUM_CACHEPAGES >> (28 - CFS_PAGE_SHIFT)) + NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT)) #define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1) #define OSS_NTHRS_BASE 64 #define OSS_NTHRS_MAX 512 @@ -1567,7 +1567,7 @@ struct nrs_orr_key { * id number, so this _should_ be more than enough for the maximum number of * CPTs on any system. If it does happen that this statement is incorrect, * nrs_orr_genobjname() will inevitably yield a non-unique name and cause - * cfs_mem_cache_create() to complain (on Linux), so the erroneous situation + * kmem_cache_create() to complain (on Linux), so the erroneous situation * will hopefully not go unnoticed. */ #define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3) @@ -1579,7 +1579,7 @@ struct nrs_orr_data { struct ptlrpc_nrs_resource od_res; cfs_binheap_t *od_binheap; cfs_hash_t *od_obj_hash; - cfs_mem_cache_t *od_cache; + struct kmem_cache *od_cache; /** * Used when a new scheduling round commences, in order to synchronize * all object or OST batches with the new round number. @@ -2983,16 +2983,16 @@ static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk) __ptlrpc_free_bulk(bulk, 0); } void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, int len, int); + struct page *page, int pageoffset, int len, int); static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, + struct page *page, int pageoffset, int len) { __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1); } static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, + struct page *page, int pageoffset, int len) { __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); diff --git a/lustre/include/obd.h b/lustre/include/obd.h index 7e866e9..9747623 100644 --- a/lustre/include/obd.h +++ b/lustre/include/obd.h @@ -216,32 +216,32 @@ static inline int lov_lum_lsm_cmp(struct lov_user_md *lum, } static inline int lov_lum_swab_if_needed(struct lov_user_md_v3 *lumv3, - int *lmm_magic, - struct lov_user_md *lum) + int *lmm_magic, + struct lov_user_md *lum) { - if (lum && cfs_copy_from_user(lumv3, lum,sizeof(struct lov_user_md_v1))) - return -EFAULT; - - *lmm_magic = lumv3->lmm_magic; - - if (*lmm_magic == __swab32(LOV_USER_MAGIC_V1)) { - lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lumv3); - *lmm_magic = LOV_USER_MAGIC_V1; - } else if (*lmm_magic == LOV_USER_MAGIC_V3) { - if (lum && cfs_copy_from_user(lumv3, lum, sizeof(*lumv3))) - return -EFAULT; - } else if (*lmm_magic == __swab32(LOV_USER_MAGIC_V3)) { - if (lum && cfs_copy_from_user(lumv3, lum, sizeof(*lumv3))) - return -EFAULT; - lustre_swab_lov_user_md_v3(lumv3); - *lmm_magic = LOV_USER_MAGIC_V3; - } else if (*lmm_magic != LOV_USER_MAGIC_V1) { - CDEBUG(D_IOCTL, - "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n", - *lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3); - return -EINVAL; - } - return 0; + if (lum && copy_from_user(lumv3, lum, sizeof(struct lov_user_md_v1))) + return -EFAULT; + + *lmm_magic = lumv3->lmm_magic; + + if (*lmm_magic == __swab32(LOV_USER_MAGIC_V1)) { + lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lumv3); + *lmm_magic = LOV_USER_MAGIC_V1; + } else if (*lmm_magic == LOV_USER_MAGIC_V3) { + if (lum && copy_from_user(lumv3, lum, sizeof(*lumv3))) + return -EFAULT; + } else if (*lmm_magic == __swab32(LOV_USER_MAGIC_V3)) { + if (lum && copy_from_user(lumv3, lum, sizeof(*lumv3))) + return -EFAULT; + lustre_swab_lov_user_md_v3(lumv3); + *lmm_magic = LOV_USER_MAGIC_V3; + } else if (*lmm_magic != LOV_USER_MAGIC_V1) { + CDEBUG(D_IOCTL, + "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n", + *lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3); + return -EINVAL; + } + return 0; } void lov_stripe_lock(struct lov_stripe_md *md); @@ -259,10 +259,10 @@ struct obd_type { }; struct brw_page { - obd_off off; - cfs_page_t *pg; - int count; - obd_flag flag; + obd_off off; + struct page *pg; + int count; + obd_flag flag; }; /* Individual type definitions */ @@ -462,7 +462,7 @@ struct client_obd { int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine - * the extent size. A chunk is max(CFS_PAGE_SIZE, OST block size) */ + * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */ int cl_chunkbits; int cl_chunk; int cl_extent_tax; /* extent overhead, by bytes */ @@ -757,7 +757,7 @@ struct niobuf_local { __u32 lnb_page_offset; __u32 len; __u32 flags; - cfs_page_t *page; + struct page *page; struct dentry *dentry; int lnb_grant_used; int rc; @@ -1702,7 +1702,7 @@ bad_format: static inline int cli_brw_size(struct obd_device *obd) { LASSERT(obd != NULL); - return obd->u.cli.cl_max_pages_per_rpc << CFS_PAGE_SHIFT; + return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; } #endif /* __OBD_H */ diff --git a/lustre/include/obd_class.h b/lustre/include/obd_class.h index f3be6cc..029b21e 100644 --- a/lustre/include/obd_class.h +++ b/lustre/include/obd_class.h @@ -2238,11 +2238,11 @@ extern int obd_init_caches(void); extern void obd_cleanup_caches(void); /* support routines */ -extern cfs_mem_cache_t *obdo_cachep; +extern struct kmem_cache *obdo_cachep; #define OBDO_ALLOC(ptr) \ do { \ - OBD_SLAB_ALLOC_PTR_GFP((ptr), obdo_cachep, CFS_ALLOC_IO); \ + OBD_SLAB_ALLOC_PTR_GFP((ptr), obdo_cachep, __GFP_IO); \ } while(0) #define OBDO_FREE(ptr) \ diff --git a/lustre/include/obd_support.h b/lustre/include/obd_support.h index 9739e0e..f572a63 100644 --- a/lustre/include/obd_support.h +++ b/lustre/include/obd_support.h @@ -604,13 +604,12 @@ static inline void obd_pages_sub(int order) #define __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, flags) \ do { \ (ptr) = (cptab) == NULL ? \ - cfs_alloc(size, flags) : \ - cfs_cpt_malloc(cptab, cpt, size, flags); \ + kmalloc(size, flags | __GFP_ZERO) : \ + cfs_cpt_malloc(cptab, cpt, size, flags | __GFP_ZERO); \ if (unlikely((ptr) == NULL)) { \ CERROR("kmalloc of '" #ptr "' (%d bytes) failed at %s:%d\n", \ (int)(size), __FILE__, __LINE__); \ } else { \ - memset(ptr, 0, size); \ CDEBUG(D_MALLOC, "kmalloced '" #ptr "': %d at %p\n", \ (int)(size), ptr); \ } \ @@ -619,7 +618,7 @@ do { \ #else /* this version is for the kernel and liblustre */ #define OBD_FREE_RTN0(ptr) \ ({ \ - cfs_free(ptr); \ + kfree(ptr); \ (ptr) = NULL; \ 0; \ }) @@ -627,14 +626,13 @@ do { \ #define __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, flags) \ do { \ (ptr) = (cptab) == NULL ? \ - cfs_alloc(size, flags) : \ - cfs_cpt_malloc(cptab, cpt, size, flags); \ + kmalloc(size, flags | __GFP_ZERO) : \ + cfs_cpt_malloc(cptab, cpt, size, flags | __GFP_ZERO); \ if (likely((ptr) != NULL && \ (!HAS_FAIL_ALLOC_FLAG || obd_alloc_fail_rate == 0 || \ !obd_alloc_fail(ptr, #ptr, "km", size, \ __FILE__, __LINE__) || \ OBD_FREE_RTN0(ptr)))){ \ - memset(ptr, 0, size); \ OBD_ALLOC_POST(ptr, size, "kmalloced"); \ } \ } while (0) @@ -643,8 +641,8 @@ do { \ #define OBD_ALLOC_GFP(ptr, size, gfp_mask) \ __OBD_MALLOC_VERBOSE(ptr, NULL, 0, size, gfp_mask) -#define OBD_ALLOC(ptr, size) OBD_ALLOC_GFP(ptr, size, CFS_ALLOC_IO) -#define OBD_ALLOC_WAIT(ptr, size) OBD_ALLOC_GFP(ptr, size, CFS_ALLOC_STD) +#define OBD_ALLOC(ptr, size) OBD_ALLOC_GFP(ptr, size, __GFP_IO) +#define OBD_ALLOC_WAIT(ptr, size) OBD_ALLOC_GFP(ptr, size, GFP_IOFS) #define OBD_ALLOC_PTR(ptr) OBD_ALLOC(ptr, sizeof *(ptr)) #define OBD_ALLOC_PTR_WAIT(ptr) OBD_ALLOC_WAIT(ptr, sizeof *(ptr)) @@ -652,7 +650,7 @@ do { \ __OBD_MALLOC_VERBOSE(ptr, cptab, cpt, size, gfp_mask) #define OBD_CPT_ALLOC(ptr, cptab, cpt, size) \ - OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, CFS_ALLOC_IO) + OBD_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO) #define OBD_CPT_ALLOC_PTR(ptr, cptab, cpt) \ OBD_CPT_ALLOC(ptr, cptab, cpt, sizeof *(ptr)) @@ -660,7 +658,7 @@ do { \ # define __OBD_VMALLOC_VEROBSE(ptr, cptab, cpt, size) \ do { \ (ptr) = cptab == NULL ? \ - cfs_alloc_large(size) : \ + vmalloc(size) : \ cfs_cpt_vmalloc(cptab, cpt, size); \ if (unlikely((ptr) == NULL)) { \ CERROR("vmalloc of '" #ptr "' (%d bytes) failed\n", \ @@ -687,7 +685,7 @@ do { \ * since vmalloc in Linux doesn't perform well on multi-cores system, calling * vmalloc in critical path would hurt peformance badly. See LU-66. */ -#define OBD_ALLOC_BIG (4 * CFS_PAGE_SIZE) +#define OBD_ALLOC_BIG (4 * PAGE_CACHE_SIZE) #define OBD_ALLOC_LARGE(ptr, size) \ do { \ @@ -733,7 +731,7 @@ do { \ #endif #ifdef POISON_BULK -#define POISON_PAGE(page, val) do { memset(kmap(page), val, CFS_PAGE_SIZE); \ +#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_CACHE_SIZE); \ kunmap(page); } while (0) #else #define POISON_PAGE(page, val) do { } while (0) @@ -743,7 +741,7 @@ do { \ #define OBD_FREE(ptr, size) \ do { \ OBD_FREE_PRE(ptr, size, "kfreed"); \ - cfs_free(ptr); \ + kfree(ptr); \ POISON_PTR(ptr); \ } while(0) @@ -767,7 +765,7 @@ do { \ #define OBD_VFREE(ptr, size) \ do { \ OBD_FREE_PRE(ptr, size, "vfreed"); \ - cfs_free_large(ptr); \ + vfree(ptr); \ POISON_PTR(ptr); \ } while (0) @@ -776,23 +774,22 @@ do { \ * love to assert on that, but slab.c keeps kmem_cache_s all to itself. */ #define OBD_SLAB_FREE_RTN0(ptr, slab) \ ({ \ - cfs_mem_cache_free((slab), (ptr)); \ + kmem_cache_free((slab), (ptr)); \ (ptr) = NULL; \ 0; \ }) #define __OBD_SLAB_ALLOC_VERBOSE(ptr, slab, cptab, cpt, size, type) \ do { \ - LASSERT(ergo((type) != CFS_ALLOC_ATOMIC, !cfs_in_interrupt())); \ + LASSERT(ergo((type) != GFP_ATOMIC, !cfs_in_interrupt())); \ (ptr) = (cptab) == NULL ? \ - cfs_mem_cache_alloc(slab, type) : \ - cfs_mem_cache_cpt_alloc(slab, cptab, cpt, type); \ + kmem_cache_alloc(slab, type | __GFP_ZERO) : \ + cfs_mem_cache_cpt_alloc(slab, cptab, cpt, type | __GFP_ZERO); \ if (likely((ptr) != NULL && \ (!HAS_FAIL_ALLOC_FLAG || obd_alloc_fail_rate == 0 || \ !obd_alloc_fail(ptr, #ptr, "slab-", size, \ __FILE__, __LINE__) || \ OBD_SLAB_FREE_RTN0(ptr, slab)))) { \ - memset(ptr, 0, size); \ OBD_ALLOC_POST(ptr, size, "slab-alloced"); \ } \ } while(0) @@ -807,15 +804,15 @@ do { \ #define OBD_SLAB_FREE(ptr, slab, size) \ do { \ OBD_FREE_PRE(ptr, size, "slab-freed"); \ - cfs_mem_cache_free(slab, ptr); \ + kmem_cache_free(slab, ptr); \ POISON_PTR(ptr); \ } while(0) #define OBD_SLAB_ALLOC(ptr, slab, size) \ - OBD_SLAB_ALLOC_GFP(ptr, slab, size, CFS_ALLOC_IO) + OBD_SLAB_ALLOC_GFP(ptr, slab, size, __GFP_IO) #define OBD_SLAB_CPT_ALLOC(ptr, slab, cptab, cpt, size) \ - OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, CFS_ALLOC_IO) + OBD_SLAB_CPT_ALLOC_GFP(ptr, slab, cptab, cpt, size, __GFP_IO) #define OBD_SLAB_ALLOC_PTR(ptr, slab) \ OBD_SLAB_ALLOC(ptr, slab, sizeof *(ptr)) @@ -839,17 +836,17 @@ do { \ #define __OBD_PAGE_ALLOC_VERBOSE(ptr, cptab, cpt, gfp_mask) \ do { \ (ptr) = (cptab) == NULL ? \ - cfs_alloc_page(gfp_mask) : \ + alloc_page(gfp_mask) : \ cfs_page_cpt_alloc(cptab, cpt, gfp_mask); \ if (unlikely((ptr) == NULL)) { \ CERROR("alloc_pages of '" #ptr "' %d page(s) / "LPU64" bytes "\ "failed\n", (int)1, \ - (__u64)(1 << CFS_PAGE_SHIFT)); \ + (__u64)(1 << PAGE_CACHE_SHIFT)); \ CERROR(LPU64" total bytes and "LPU64" total pages " \ "("LPU64" bytes) allocated by Lustre, " \ "%d total bytes by LNET\n", \ obd_memory_sum(), \ - obd_pages_sum() << CFS_PAGE_SHIFT, \ + obd_pages_sum() << PAGE_CACHE_SHIFT, \ obd_pages_sum(), \ cfs_atomic_read(&libcfs_kmemory)); \ } else { \ @@ -857,7 +854,7 @@ do { \ CDEBUG(D_MALLOC, "alloc_pages '" #ptr "': %d page(s) / " \ LPU64" bytes at %p.\n", \ (int)1, \ - (__u64)(1 << CFS_PAGE_SHIFT), ptr); \ + (__u64)(1 << PAGE_CACHE_SHIFT), ptr); \ } \ } while (0) @@ -872,9 +869,9 @@ do { \ obd_pages_sub(0); \ CDEBUG(D_MALLOC, "free_pages '" #ptr "': %d page(s) / "LPU64" bytes " \ "at %p.\n", \ - (int)1, (__u64)(1 << CFS_PAGE_SHIFT), \ + (int)1, (__u64)(1 << PAGE_CACHE_SHIFT), \ ptr); \ - cfs_free_page(ptr); \ + __free_page(ptr); \ (ptr) = (void *)0xdeadbeef; \ } while (0) diff --git a/lustre/lclient/lcommon_cl.c b/lustre/lclient/lcommon_cl.c index bc97877..96b8b3e 100644 --- a/lustre/lclient/lcommon_cl.c +++ b/lustre/lclient/lcommon_cl.c @@ -85,11 +85,11 @@ const struct cl_req_operations ccc_req_ops; * ccc_ prefix stands for "Common Client Code". */ -static cfs_mem_cache_t *ccc_lock_kmem; -static cfs_mem_cache_t *ccc_object_kmem; -static cfs_mem_cache_t *ccc_thread_kmem; -static cfs_mem_cache_t *ccc_session_kmem; -static cfs_mem_cache_t *ccc_req_kmem; +static struct kmem_cache *ccc_lock_kmem; +static struct kmem_cache *ccc_object_kmem; +static struct kmem_cache *ccc_thread_kmem; +static struct kmem_cache *ccc_session_kmem; +static struct kmem_cache *ccc_req_kmem; static struct lu_kmem_descr ccc_caches[] = { { @@ -133,7 +133,7 @@ void *ccc_key_init(const struct lu_context *ctx, { struct ccc_thread_info *info; - OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(info, ccc_thread_kmem, __GFP_IO); if (info == NULL) info = ERR_PTR(-ENOMEM); return info; @@ -151,7 +151,7 @@ void *ccc_session_key_init(const struct lu_context *ctx, { struct ccc_session *session; - OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(session, ccc_session_kmem, __GFP_IO); if (session == NULL) session = ERR_PTR(-ENOMEM); return session; @@ -269,7 +269,7 @@ int ccc_req_init(const struct lu_env *env, struct cl_device *dev, struct ccc_req *vrq; int result; - OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(vrq, ccc_req_kmem, __GFP_IO); if (vrq != NULL) { cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops); result = 0; @@ -345,7 +345,7 @@ struct lu_object *ccc_object_alloc(const struct lu_env *env, struct ccc_object *vob; struct lu_object *obj; - OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(vob, ccc_object_kmem, __GFP_IO); if (vob != NULL) { struct cl_object_header *hdr; @@ -414,7 +414,7 @@ int ccc_lock_init(const struct lu_env *env, CLOBINVRNT(env, obj, ccc_object_invariant(obj)); - OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(clk, ccc_lock_kmem, __GFP_IO); if (clk != NULL) { cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops); result = 0; @@ -479,7 +479,7 @@ static void ccc_object_size_unlock(struct cl_object *obj) * */ -cfs_page_t *ccc_page_vmpage(const struct lu_env *env, +struct page *ccc_page_vmpage(const struct lu_env *env, const struct cl_page_slice *slice) { return cl2vm_page(slice); @@ -885,12 +885,14 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, * kernel will check such case correctly. * linux-2.6.18-128.1.1 miss to do that. * --bug 17336 */ - loff_t size = cl_isize_read(inode); - unsigned long cur_index = start >> CFS_PAGE_SHIFT; - - if ((size == 0 && cur_index != 0) || - (((size - 1) >> CFS_PAGE_SHIFT) < cur_index)) - *exceed = 1; + loff_t size = cl_isize_read(inode); + unsigned long cur_index = start >> + PAGE_CACHE_SHIFT; + + if ((size == 0 && cur_index != 0) || + (((size - 1) >> PAGE_CACHE_SHIFT) < + cur_index)) + *exceed = 1; } return result; } else { @@ -1096,7 +1098,7 @@ struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice) return container_of0(slice, struct ccc_req, crq_cl); } -cfs_page_t *cl2vm_page(const struct cl_page_slice *slice) +struct page *cl2vm_page(const struct cl_page_slice *slice) { return cl2ccc_page(slice)->cpg_page; } @@ -1126,7 +1128,7 @@ struct inode *ccc_object_inode(const struct cl_object *obj) * additional reference to the resulting page. This is an unsafe version of * cl_vmpage_page() that can only be used under vmpage lock. */ -struct cl_page *ccc_vmpage_page_transient(cfs_page_t *vmpage) +struct cl_page *ccc_vmpage_page_transient(struct page *vmpage) { KLASSERT(PageLocked(vmpage)); return (struct cl_page *)vmpage->private; diff --git a/lustre/ldlm/ldlm_extent.c b/lustre/ldlm/ldlm_extent.c index c5c56ad..dbcbc72 100644 --- a/lustre/ldlm/ldlm_extent.c +++ b/lustre/ldlm/ldlm_extent.c @@ -98,7 +98,7 @@ static void ldlm_extent_internal_policy_fixup(struct ldlm_lock *req, * the client requested. Also we need to make sure it's also server * page size aligned otherwise a server page can be covered by two * write locks. */ - mask = CFS_PAGE_SIZE; + mask = PAGE_CACHE_SIZE; req_align = (req_end + 1) | req_start; if (req_align != 0 && (req_align & (mask - 1)) == 0) { while ((req_align & mask) == 0) @@ -839,14 +839,14 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) } EXPORT_SYMBOL(ldlm_extent_shift_kms); -cfs_mem_cache_t *ldlm_interval_slab; +struct kmem_cache *ldlm_interval_slab; struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock) { struct ldlm_interval *node; ENTRY; LASSERT(lock->l_resource->lr_type == LDLM_EXTENT); - OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO); if (node == NULL) RETURN(NULL); diff --git a/lustre/ldlm/ldlm_internal.h b/lustre/ldlm/ldlm_internal.h index cb46c31..6b96b40 100644 --- a/lustre/ldlm/ldlm_internal.h +++ b/lustre/ldlm/ldlm_internal.h @@ -224,7 +224,7 @@ struct ldlm_state { }; /* interval tree, for LDLM_EXTENT. */ -extern cfs_mem_cache_t *ldlm_interval_slab; /* slab cache for ldlm_interval */ +extern struct kmem_cache *ldlm_interval_slab; /* slab cache for ldlm_interval */ extern void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l); extern struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l); extern struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock); diff --git a/lustre/ldlm/ldlm_lib.c b/lustre/ldlm/ldlm_lib.c index 517b8e3..8879797 100644 --- a/lustre/ldlm/ldlm_lib.c +++ b/lustre/ldlm/ldlm_lib.c @@ -342,9 +342,9 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) cli->cl_dirty = 0; cli->cl_avail_grant = 0; /* FIXME: Should limit this for the sum of all cl_dirty_max. */ - cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; - if (cli->cl_dirty_max >> CFS_PAGE_SHIFT > cfs_num_physpages / 8) - cli->cl_dirty_max = cfs_num_physpages << (CFS_PAGE_SHIFT - 3); + cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; + if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > num_physpages / 8) + cli->cl_dirty_max = num_physpages << (PAGE_CACHE_SHIFT - 3); CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters); CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list); CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); @@ -390,17 +390,17 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) * 1MB until we know what the performance looks like. * In the future this should likely be increased. LU-1431 */ cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, - LNET_MTU >> CFS_PAGE_SHIFT); - - if (!strcmp(name, LUSTRE_MDC_NAME)) { - cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; - } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 128 /* MB */) { - cli->cl_max_rpcs_in_flight = 2; - } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 256 /* MB */) { - cli->cl_max_rpcs_in_flight = 3; - } else if (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT) <= 512 /* MB */) { - cli->cl_max_rpcs_in_flight = 4; - } else { + LNET_MTU >> PAGE_CACHE_SHIFT); + + if (!strcmp(name, LUSTRE_MDC_NAME)) { + cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; + } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { + cli->cl_max_rpcs_in_flight = 2; + } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { + cli->cl_max_rpcs_in_flight = 3; + } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { + cli->cl_max_rpcs_in_flight = 4; + } else { if (osc_on_mdt(obddev->obd_name)) cli->cl_max_rpcs_in_flight = MDS_OSC_MAX_RIF_DEFAULT; else diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 49d0034..1e17527 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -156,7 +156,7 @@ char *ldlm_it2str(int it) } EXPORT_SYMBOL(ldlm_it2str); -extern cfs_mem_cache_t *ldlm_lock_slab; +extern struct kmem_cache *ldlm_lock_slab; #ifdef HAVE_SERVER_SUPPORT static ldlm_processing_policy ldlm_processing_policy_table[] = { @@ -475,7 +475,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource) if (resource == NULL) LBUG(); - OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, __GFP_IO); if (lock == NULL) RETURN(NULL); @@ -1688,7 +1688,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns, * have to allocate the interval node early otherwise we can't regrant * this lock in the future. - jay */ if (!local && (*flags & LDLM_FL_REPLAY) && res->lr_type == LDLM_EXTENT) - OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO); lock_res_and_lock(lock); if (local && lock->l_req_mode == lock->l_granted_mode) { @@ -2313,9 +2313,8 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode, /* I can't check the type of lock here because the bitlock of lock * is not held here, so do the allocation blindly. -jay */ - OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, CFS_ALLOC_IO); - if (node == NULL) - /* Actually, this causes LUSTRE_EDEADLK to be returned */ + OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO); + if (node == NULL) /* Actually, this causes EDEADLOCK to be returned */ RETURN(NULL); LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR), diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 002018c..a2d284c 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -60,8 +60,8 @@ static char *ldlm_cpts; CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444, "CPU partitions ldlm threads should run on"); -extern cfs_mem_cache_t *ldlm_resource_slab; -extern cfs_mem_cache_t *ldlm_lock_slab; +extern struct kmem_cache *ldlm_resource_slab; +extern struct kmem_cache *ldlm_lock_slab; static struct mutex ldlm_ref_mutex; static int ldlm_refcount; @@ -212,14 +212,15 @@ static int expired_lock_main(void *arg) lock = cfs_list_entry(expired->next, struct ldlm_lock, l_pending_chain); - if ((void *)lock < LP_POISON + CFS_PAGE_SIZE && - (void *)lock >= LP_POISON) { + if ((void *)lock < LP_POISON + PAGE_CACHE_SIZE && + (void *)lock >= LP_POISON) { spin_unlock_bh(&waiting_locks_spinlock); - CERROR("free lock on elt list %p\n", lock); - LBUG(); - } - cfs_list_del_init(&lock->l_pending_chain); - if ((void *)lock->l_export < LP_POISON + CFS_PAGE_SIZE && + CERROR("free lock on elt list %p\n", lock); + LBUG(); + } + cfs_list_del_init(&lock->l_pending_chain); + if ((void *)lock->l_export < + LP_POISON + PAGE_CACHE_SIZE && (void *)lock->l_export >= LP_POISON) { CERROR("lock with free export on elt list %p\n", lock->l_export); @@ -1960,7 +1961,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi, init_completion(&blwi->blwi_comp); CFS_INIT_LIST_HEAD(&blwi->blwi_head); - if (cfs_memory_pressure_get()) + if (memory_pressure_get()) blwi->blwi_mem_pressure = 1; blwi->blwi_ns = ns; @@ -2651,7 +2652,7 @@ static int ldlm_bl_thread_main(void *arg) ldlm_bl_thread_start(blp); if (blwi->blwi_mem_pressure) - cfs_memory_pressure_set(); + memory_pressure_set(); if (blwi->blwi_count) { int count; @@ -2669,7 +2670,7 @@ static int ldlm_bl_thread_main(void *arg) blwi->blwi_lock); } if (blwi->blwi_mem_pressure) - cfs_memory_pressure_clr(); + memory_pressure_clr(); if (blwi->blwi_flags & LCF_ASYNC) OBD_FREE(blwi, sizeof(*blwi)); @@ -3054,26 +3055,26 @@ int ldlm_init(void) mutex_init(&ldlm_ref_mutex); mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER)); mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT)); - ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources", - sizeof(struct ldlm_resource), 0, - CFS_SLAB_HWCACHE_ALIGN); - if (ldlm_resource_slab == NULL) - return -ENOMEM; + ldlm_resource_slab = kmem_cache_create("ldlm_resources", + sizeof(struct ldlm_resource), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (ldlm_resource_slab == NULL) + return -ENOMEM; - ldlm_lock_slab = cfs_mem_cache_create("ldlm_locks", + ldlm_lock_slab = kmem_cache_create("ldlm_locks", sizeof(struct ldlm_lock), 0, - CFS_SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU); + SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL); if (ldlm_lock_slab == NULL) { - cfs_mem_cache_destroy(ldlm_resource_slab); + kmem_cache_destroy(ldlm_resource_slab); return -ENOMEM; } - ldlm_interval_slab = cfs_mem_cache_create("interval_node", + ldlm_interval_slab = kmem_cache_create("interval_node", sizeof(struct ldlm_interval), - 0, CFS_SLAB_HWCACHE_ALIGN); + 0, SLAB_HWCACHE_ALIGN, NULL); if (ldlm_interval_slab == NULL) { - cfs_mem_cache_destroy(ldlm_resource_slab); - cfs_mem_cache_destroy(ldlm_lock_slab); + kmem_cache_destroy(ldlm_resource_slab); + kmem_cache_destroy(ldlm_lock_slab); return -ENOMEM; } #if LUSTRE_TRACKS_LOCK_EXP_REFS @@ -3084,19 +3085,15 @@ int ldlm_init(void) void ldlm_exit(void) { - int rc; - if (ldlm_refcount) - CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount); - rc = cfs_mem_cache_destroy(ldlm_resource_slab); - LASSERTF(rc == 0, "couldn't free ldlm resource slab\n"); + if (ldlm_refcount) + CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount); + kmem_cache_destroy(ldlm_resource_slab); #ifdef __KERNEL__ - /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call - * synchronize_rcu() to wait a grace period elapsed, so that - * ldlm_lock_free() get a chance to be called. */ - synchronize_rcu(); + /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call + * synchronize_rcu() to wait a grace period elapsed, so that + * ldlm_lock_free() get a chance to be called. */ + synchronize_rcu(); #endif - rc = cfs_mem_cache_destroy(ldlm_lock_slab); - LASSERTF(rc == 0, "couldn't free ldlm lock slab\n"); - rc = cfs_mem_cache_destroy(ldlm_interval_slab); - LASSERTF(rc == 0, "couldn't free interval node slab\n"); + kmem_cache_destroy(ldlm_lock_slab); + kmem_cache_destroy(ldlm_interval_slab); } diff --git a/lustre/ldlm/ldlm_pool.c b/lustre/ldlm/ldlm_pool.c index c9aebdb..d32ec5f 100644 --- a/lustre/ldlm/ldlm_pool.c +++ b/lustre/ldlm/ldlm_pool.c @@ -114,7 +114,7 @@ /* * 50 ldlm locks for 1MB of RAM. */ -#define LDLM_POOL_HOST_L ((CFS_NUM_CACHEPAGES >> (20 - CFS_PAGE_SHIFT)) * 50) +#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) /* * Maximal possible grant step plan in %. @@ -1063,8 +1063,8 @@ static int ldlm_pool_granted(struct ldlm_pool *pl) } static struct ptlrpc_thread *ldlm_pools_thread; -static struct cfs_shrinker *ldlm_pools_srv_shrinker; -static struct cfs_shrinker *ldlm_pools_cli_shrinker; +static struct shrinker *ldlm_pools_srv_shrinker; +static struct shrinker *ldlm_pools_cli_shrinker; static struct completion ldlm_pools_comp; /* @@ -1437,33 +1437,33 @@ static void ldlm_pools_thread_stop(void) int ldlm_pools_init(void) { - int rc; - ENTRY; + int rc; + ENTRY; - rc = ldlm_pools_thread_start(); - if (rc == 0) { - ldlm_pools_srv_shrinker = - cfs_set_shrinker(CFS_DEFAULT_SEEKS, - ldlm_pools_srv_shrink); - ldlm_pools_cli_shrinker = - cfs_set_shrinker(CFS_DEFAULT_SEEKS, - ldlm_pools_cli_shrink); - } - RETURN(rc); + rc = ldlm_pools_thread_start(); + if (rc == 0) { + ldlm_pools_srv_shrinker = + set_shrinker(DEFAULT_SEEKS, + ldlm_pools_srv_shrink); + ldlm_pools_cli_shrinker = + set_shrinker(DEFAULT_SEEKS, + ldlm_pools_cli_shrink); + } + RETURN(rc); } EXPORT_SYMBOL(ldlm_pools_init); void ldlm_pools_fini(void) { - if (ldlm_pools_srv_shrinker != NULL) { - cfs_remove_shrinker(ldlm_pools_srv_shrinker); - ldlm_pools_srv_shrinker = NULL; - } - if (ldlm_pools_cli_shrinker != NULL) { - cfs_remove_shrinker(ldlm_pools_cli_shrinker); - ldlm_pools_cli_shrinker = NULL; - } - ldlm_pools_thread_stop(); + if (ldlm_pools_srv_shrinker != NULL) { + remove_shrinker(ldlm_pools_srv_shrinker); + ldlm_pools_srv_shrinker = NULL; + } + if (ldlm_pools_cli_shrinker != NULL) { + remove_shrinker(ldlm_pools_cli_shrinker); + ldlm_pools_cli_shrinker = NULL; + } + ldlm_pools_thread_stop(); } EXPORT_SYMBOL(ldlm_pools_fini); #endif /* __KERNEL__ */ diff --git a/lustre/ldlm/ldlm_request.c b/lustre/ldlm/ldlm_request.c index cedd5e3..61ac593 100644 --- a/lustre/ldlm/ldlm_request.c +++ b/lustre/ldlm/ldlm_request.c @@ -723,7 +723,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off) { int avail; - avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size; + avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; if (likely(avail >= 0)) avail /= (int)sizeof(struct lustre_handle); else diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index 3193766..0b74780 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -50,7 +50,7 @@ #include #include "ldlm_internal.h" -cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab; +struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab; int ldlm_srv_namespace_nr = 0; int ldlm_cli_namespace_nr = 0; @@ -197,7 +197,7 @@ static int lprocfs_wr_lru_size(struct file *file, const char *buffer, int lru_resize; dummy[MAX_STRING_SIZE] = '\0'; - if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE)) + if (copy_from_user(dummy, buffer, MAX_STRING_SIZE)) return -EFAULT; if (strncmp(dummy, "clear", 5) == 0) { @@ -1069,7 +1069,7 @@ static struct ldlm_resource *ldlm_resource_new(void) struct ldlm_resource *res; int idx; - OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(res, ldlm_resource_slab, __GFP_IO); if (res == NULL) return NULL; diff --git a/lustre/lfsck/lfsck_namespace.c b/lustre/lfsck/lfsck_namespace.c index 7516ba5..a3a3948 100644 --- a/lustre/lfsck/lfsck_namespace.c +++ b/lustre/lfsck/lfsck_namespace.c @@ -386,7 +386,7 @@ static int lfsck_links_read(const struct lu_env *env, struct dt_object *obj, ldata->ld_buf = lu_buf_check_and_alloc(&lfsck_env_info(env)->lti_linkea_buf, - CFS_PAGE_SIZE); + PAGE_CACHE_SIZE); if (ldata->ld_buf->lb_buf == NULL) return -ENOMEM; diff --git a/lustre/liblustre/dir.c b/lustre/liblustre/dir.c index 1cadcf1..c7560e8 100644 --- a/lustre/liblustre/dir.c +++ b/lustre/liblustre/dir.c @@ -120,10 +120,10 @@ static int llu_dir_do_readpage(struct inode *inode, struct page *page) return rc; } -static cfs_page_t *llu_dir_read_page(struct inode *ino, __u64 hash, +static struct page *llu_dir_read_page(struct inode *ino, __u64 hash, int exact, struct ll_dir_chain *chain) { - cfs_page_t *page; + struct page *page; int rc; ENTRY; @@ -193,7 +193,7 @@ ssize_t llu_iop_filldirentries(struct inode *dir, _SYSIO_OFF_T *basep, struct intnl_stat *st = llu_i2stat(dir); loff_t pos = *basep; struct ll_dir_chain chain; - cfs_page_t *page; + struct page *page; int filled = 0; int rc; int done; diff --git a/lustre/liblustre/llite_cl.c b/lustre/liblustre/llite_cl.c index adc9dc9..630be49 100644 --- a/lustre/liblustre/llite_cl.c +++ b/lustre/liblustre/llite_cl.c @@ -58,7 +58,7 @@ static int slp_type_init (struct lu_device_type *t); static void slp_type_fini (struct lu_device_type *t); static int slp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_page *page, struct page *vmpage); static int slp_attr_get (const struct lu_env *env, struct cl_object *obj, struct cl_attr *attr); @@ -224,7 +224,7 @@ void slp_global_fini(void) */ static int slp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) + struct cl_page *page, struct page *vmpage) { struct ccc_page *cpg = cl_object_page_slice(obj, page); @@ -294,7 +294,7 @@ static int slp_attr_get(const struct lu_env *env, struct cl_object *obj, static void slp_page_fini_common(struct ccc_page *cp) { - cfs_page_t *vmpage = cp->cpg_page; + struct page *vmpage = cp->cpg_page; LASSERT(vmpage != NULL); llu_free_user_page(vmpage); @@ -497,8 +497,8 @@ static int llu_queue_pio(const struct lu_env *env, struct cl_io *io, unsigned long index, offset, bytes; offset = (pos & ~CFS_PAGE_MASK); - index = pos >> CFS_PAGE_SHIFT; - bytes = CFS_PAGE_SIZE - offset; + index = pos >> PAGE_CACHE_SHIFT; + bytes = PAGE_CACHE_SIZE - offset; if (bytes > count) bytes = count; @@ -582,7 +582,8 @@ struct llu_io_group * get_io_group(struct inode *inode, int maxpages, static int max_io_pages(ssize_t len, int iovlen) { - return (((len + CFS_PAGE_SIZE -1) / CFS_PAGE_SIZE) + 2 + iovlen - 1); + return ((len + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE) + + 2 + iovlen - 1; } void put_io_group(struct llu_io_group *group) diff --git a/lustre/liblustre/llite_lib.h b/lustre/liblustre/llite_lib.h index b475fb6..2b427c3 100644 --- a/lustre/liblustre/llite_lib.h +++ b/lustre/liblustre/llite_lib.h @@ -65,7 +65,7 @@ /* This should not be "optimized" use ~0ULL because page->index is a long and * 32-bit systems are therefore limited to 16TB in a mapping */ -#define MAX_LFS_FILESIZE ((__u64)(~0UL) << CFS_PAGE_SHIFT) +#define MAX_LFS_FILESIZE ((__u64)(~0UL) << PAGE_CACHE_SHIFT) struct ll_file_data { struct obd_client_handle fd_mds_och; __u32 fd_flags; @@ -345,7 +345,7 @@ static inline void inode_init_lvb(struct inode *inode, struct ost_lvb *lvb) #define LLU_IO_GROUP_SIZE(x) \ (sizeof(struct llu_io_group) + \ (sizeof(struct ll_async_page) + \ - sizeof(cfs_page_t) + \ + sizeof(struct page) + \ llap_cookie_size) * (x)) struct llu_io_session { diff --git a/lustre/liblustre/super.c b/lustre/liblustre/super.c index 22edc5c..be0b4ca 100644 --- a/lustre/liblustre/super.c +++ b/lustre/liblustre/super.c @@ -1600,7 +1600,7 @@ static int llu_lov_dir_setstripe(struct inode *ino, unsigned long arg) LASSERT(sizeof(lum) == sizeof(*lump)); LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0])); - if (cfs_copy_from_user(&lum, lump, sizeof(lum))) + if (copy_from_user(&lum, lump, sizeof(lum))) return(-EFAULT); switch (lum.lmm_magic) { @@ -1712,7 +1712,7 @@ static int llu_lov_file_setstripe(struct inode *ino, unsigned long arg) LASSERT(sizeof(lum) == sizeof(*lump)); LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lump->lmm_objects[0])); - if (cfs_copy_from_user(&lum, lump, sizeof(lum))) + if (copy_from_user(&lum, lump, sizeof(lum))) RETURN(-EFAULT); rc = llu_lov_setstripe_ea_info(ino, flags, &lum, sizeof(lum)); diff --git a/lustre/liblustre/tests/sanity.c b/lustre/liblustre/tests/sanity.c index b5b595c..21deed3 100644 --- a/lustre/liblustre/tests/sanity.c +++ b/lustre/liblustre/tests/sanity.c @@ -900,14 +900,14 @@ static int pages_io(int xfer, loff_t pos) /* create sample data */ for (i = 0, buf = buf_alloc; i < _npages; i++) { - for (j = 0; j < CFS_PAGE_SIZE/sizeof(int); j++, buf++) { + for (j = 0; j < PAGE_CACHE_SIZE/sizeof(int); j++, buf++) { *buf = rand(); } } /* compute checksum */ for (i = 0, buf = buf_alloc; i < _npages; i++) { - for (j = 0; j < CFS_PAGE_SIZE/sizeof(int); j++, buf++) { + for (j = 0; j < PAGE_CACHE_SIZE/sizeof(int); j++, buf++) { check_sum[i] += *buf; } } @@ -925,9 +925,9 @@ static int pages_io(int xfer, loff_t pos) } gettimeofday(&tw1, NULL); for (i = 0, buf = buf_alloc; i < _npages; - i += xfer, buf += xfer * CFS_PAGE_SIZE / sizeof(int)) { - rc = write(fd, buf, CFS_PAGE_SIZE * xfer); - if (rc != CFS_PAGE_SIZE * xfer) { + i += xfer, buf += xfer * PAGE_CACHE_SIZE / sizeof(int)) { + rc = write(fd, buf, PAGE_CACHE_SIZE * xfer); + if (rc != PAGE_CACHE_SIZE * xfer) { printf("write error (i %d, rc %d): %s\n", i, rc, strerror(errno)); return(1); @@ -945,9 +945,9 @@ static int pages_io(int xfer, loff_t pos) } gettimeofday(&tr1, NULL); for (i = 0, buf = buf_alloc; i < _npages; - i += xfer, buf += xfer * CFS_PAGE_SIZE / sizeof(int)) { - rc = read(fd, buf, CFS_PAGE_SIZE * xfer); - if (rc != CFS_PAGE_SIZE * xfer) { + i += xfer, buf += xfer * PAGE_CACHE_SIZE / sizeof(int)) { + rc = read(fd, buf, PAGE_CACHE_SIZE * xfer); + if (rc != PAGE_CACHE_SIZE * xfer) { printf("read error (i %d, rc %d): %s\n", i, rc, strerror(errno)); return(1); @@ -958,7 +958,7 @@ static int pages_io(int xfer, loff_t pos) /* compute checksum */ for (i = 0, buf = buf_alloc; i < _npages; i++) { int sum = 0; - for (j = 0; j < CFS_PAGE_SIZE/sizeof(int); j++, buf++) { + for (j = 0; j < PAGE_CACHE_SIZE/sizeof(int); j++, buf++) { sum += *buf; } if (sum != check_sum[i]) { @@ -973,8 +973,8 @@ static int pages_io(int xfer, loff_t pos) tw = (tw2.tv_sec - tw1.tv_sec) * 1000000 + (tw2.tv_usec - tw1.tv_usec); tr = (tr2.tv_sec - tr1.tv_sec) * 1000000 + (tr2.tv_usec - tr1.tv_usec); printf(" (R:%.3fM/s, W:%.3fM/s)\n", - (_npages * CFS_PAGE_SIZE) / (tw / 1000000.0) / (1024 * 1024), - (_npages * CFS_PAGE_SIZE) / (tr / 1000000.0) / (1024 * 1024)); + (_npages * PAGE_CACHE_SIZE) / (tw / 1000000.0) / (1024 * 1024), + (_npages * PAGE_CACHE_SIZE) / (tr / 1000000.0) / (1024 * 1024)); if (data_error) return 1; @@ -1507,7 +1507,7 @@ int main(int argc, char * const argv[]) __liblustre_setup_(); - buf_size = _npages * CFS_PAGE_SIZE; + buf_size = _npages * PAGE_CACHE_SIZE; if (opt_verbose) printf("allocating %d bytes buffer\n", buf_size); buf_alloc = calloc(1, buf_size); diff --git a/lustre/llite/dir.c b/lustre/llite/dir.c index def2967..9365383 100644 --- a/lustre/llite/dir.c +++ b/lustre/llite/dir.c @@ -134,7 +134,7 @@ * a header lu_dirpage which describes the start/end hash, and whether this * page is empty (contains no dir entry) or hash collide with next page. * After client receives reply, several pages will be integrated into dir page - * in CFS_PAGE_SIZE (if CFS_PAGE_SIZE greater than LU_PAGE_SIZE), and the + * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the * lu_dirpage for this integrated page will be adjusted. See * lmv_adjust_dirpages(). * @@ -156,7 +156,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) struct pagevec lru_pvec; #endif struct lu_dirpage *dp; - int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> CFS_PAGE_SHIFT; + int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT; int nrdpgs = 0; /* number of pages read actually */ int npages; int i; @@ -196,8 +196,8 @@ static int ll_dir_filler(void *_hash, struct page *page0) if (body->valid & OBD_MD_FLSIZE) cl_isize_write(inode, body->size); - nrdpgs = (request->rq_bulk->bd_nob_transferred+CFS_PAGE_SIZE-1) - >> CFS_PAGE_SHIFT; + nrdpgs = (request->rq_bulk->bd_nob_transferred + + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; SetPageUptodate(page0); } unlock_page(page0); @@ -219,9 +219,9 @@ static int ll_dir_filler(void *_hash, struct page *page0) SetPageUptodate(page); - dp = cfs_kmap(page); - hash = le64_to_cpu(dp->ldp_hash_start); - cfs_kunmap(page); + dp = kmap(page); + hash = le64_to_cpu(dp->ldp_hash_start); + kunmap(page); offset = hash_x_index(hash, hash64); @@ -299,7 +299,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash, */ wait_on_page(page); if (PageUptodate(page)) { - dp = cfs_kmap(page); + dp = kmap(page); if (BITS_PER_LONG == 32 && hash64) { *start = le64_to_cpu(dp->ldp_hash_start) >> 32; *end = le64_to_cpu(dp->ldp_hash_end) >> 32; @@ -1080,7 +1080,7 @@ static int copy_and_ioctl(int cmd, struct obd_export *exp, void *data, int len) OBD_ALLOC(ptr, len); if (ptr == NULL) return -ENOMEM; - if (cfs_copy_from_user(ptr, data, len)) { + if (copy_from_user(ptr, data, len)) { OBD_FREE(ptr, len); return -EFAULT; } @@ -1385,11 +1385,11 @@ lmv_out_free: LASSERT(sizeof(lumv3.lmm_objects[0]) == sizeof(lumv3p->lmm_objects[0])); /* first try with v1 which is smaller than v3 */ - if (cfs_copy_from_user(lumv1, lumv1p, sizeof(*lumv1))) + if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1))) RETURN(-EFAULT); if ((lumv1->lmm_magic == LOV_USER_MAGIC_V3) ) { - if (cfs_copy_from_user(&lumv3, lumv3p, sizeof(lumv3))) + if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3))) RETURN(-EFAULT); } @@ -1516,8 +1516,8 @@ out_rmdir: lmdp = (struct lov_user_mds_data *)arg; lump = &lmdp->lmd_lmm; } - if (cfs_copy_to_user(lump, lmm, lmmsize)) { - if (cfs_copy_to_user(lump, lmm, sizeof(*lump))) + if (copy_to_user(lump, lmm, lmmsize)) { + if (copy_to_user(lump, lmm, sizeof(*lump))) GOTO(out_req, rc = -EFAULT); rc = -EOVERFLOW; } @@ -1533,7 +1533,7 @@ out_rmdir: st.st_gid = body->gid; st.st_rdev = body->rdev; st.st_size = body->size; - st.st_blksize = CFS_PAGE_SIZE; + st.st_blksize = PAGE_CACHE_SIZE; st.st_blocks = body->blocks; st.st_atime = body->atime; st.st_mtime = body->mtime; @@ -1541,7 +1541,7 @@ out_rmdir: st.st_ino = inode->i_ino; lmdp = (struct lov_user_mds_data *)arg; - if (cfs_copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) + if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) GOTO(out_req, rc = -EFAULT); } @@ -1571,7 +1571,7 @@ out_rmdir: if (lmm == NULL) RETURN(-ENOMEM); - if (cfs_copy_from_user(lmm, lum, lmmsize)) + if (copy_from_user(lmm, lum, lmmsize)) GOTO(free_lmm, rc = -EFAULT); switch (lmm->lmm_magic) { @@ -1608,7 +1608,7 @@ out_rmdir: if (rc) GOTO(free_lsm, rc); - if (cfs_copy_to_user(&lumd->lmd_st, &st, sizeof(st))) + if (copy_to_user(&lumd->lmd_st, &st, sizeof(st))) GOTO(free_lsm, rc = -EFAULT); EXIT; @@ -1661,9 +1661,9 @@ out_rmdir: NULL); if (rc) { CDEBUG(D_QUOTA, "mdc ioctl %d failed: %d\n", cmd, rc); - if (cfs_copy_to_user((void *)arg, check, + if (copy_to_user((void *)arg, check, sizeof(*check))) - CDEBUG(D_QUOTA, "cfs_copy_to_user failed\n"); + CDEBUG(D_QUOTA, "copy_to_user failed\n"); GOTO(out_poll, rc); } @@ -1671,9 +1671,9 @@ out_rmdir: NULL); if (rc) { CDEBUG(D_QUOTA, "osc ioctl %d failed: %d\n", cmd, rc); - if (cfs_copy_to_user((void *)arg, check, + if (copy_to_user((void *)arg, check, sizeof(*check))) - CDEBUG(D_QUOTA, "cfs_copy_to_user failed\n"); + CDEBUG(D_QUOTA, "copy_to_user failed\n"); GOTO(out_poll, rc); } out_poll: @@ -1695,7 +1695,7 @@ out_rmdir: if (!qctl_20) GOTO(out_quotactl_18, rc = -ENOMEM); - if (cfs_copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18))) + if (copy_from_user(qctl_18, (void *)arg, sizeof(*qctl_18))) GOTO(out_quotactl_20, rc = -ENOMEM); QCTL_COPY(qctl_20, qctl_18); @@ -1720,7 +1720,7 @@ out_rmdir: QCTL_COPY(qctl_18, qctl_20); qctl_18->obd_uuid = qctl_20->obd_uuid; - if (cfs_copy_to_user((void *)arg, qctl_18, + if (copy_to_user((void *)arg, qctl_18, sizeof(*qctl_18))) rc = -EFAULT; } @@ -1741,12 +1741,12 @@ out_rmdir: if (!qctl) RETURN(-ENOMEM); - if (cfs_copy_from_user(qctl, (void *)arg, sizeof(*qctl))) + if (copy_from_user(qctl, (void *)arg, sizeof(*qctl))) GOTO(out_quotactl, rc = -EFAULT); rc = quotactl_ioctl(sbi, qctl); - if (rc == 0 && cfs_copy_to_user((void *)arg,qctl,sizeof(*qctl))) + if (rc == 0 && copy_to_user((void *)arg, qctl, sizeof(*qctl))) rc = -EFAULT; out_quotactl: @@ -1777,7 +1777,7 @@ out_rmdir: int count, vallen; struct obd_export *exp; - if (cfs_copy_from_user(&count, (int *)arg, sizeof(int))) + if (copy_from_user(&count, (int *)arg, sizeof(int))) RETURN(-EFAULT); /* get ost count when count is zero, get mdt count otherwise */ @@ -1790,13 +1790,13 @@ out_rmdir: RETURN(rc); } - if (cfs_copy_to_user((int *)arg, &count, sizeof(int))) + if (copy_to_user((int *)arg, &count, sizeof(int))) RETURN(-EFAULT); RETURN(0); } case LL_IOC_PATH2FID: - if (cfs_copy_to_user((void *)arg, ll_inode2fid(inode), + if (copy_to_user((void *)arg, ll_inode2fid(inode), sizeof(struct lu_fid))) RETURN(-EFAULT); RETURN(0); @@ -1819,7 +1819,7 @@ out_rmdir: RETURN(-ENOMEM); /* We don't know the true size yet; copy the fixed-size part */ - if (cfs_copy_from_user(hur, (void *)arg, sizeof(*hur))) { + if (copy_from_user(hur, (void *)arg, sizeof(*hur))) { OBD_FREE_PTR(hur); RETURN(-EFAULT); } @@ -1832,7 +1832,7 @@ out_rmdir: RETURN(-ENOMEM); /* Copy the whole struct */ - if (cfs_copy_from_user(hur, (void *)arg, totalsize)) { + if (copy_from_user(hur, (void *)arg, totalsize)) { OBD_FREE_LARGE(hur, totalsize); RETURN(-EFAULT); } @@ -1848,7 +1848,7 @@ out_rmdir: struct hsm_progress_kernel hpk; struct hsm_progress hp; - if (cfs_copy_from_user(&hp, (void *)arg, sizeof(hp))) + if (copy_from_user(&hp, (void *)arg, sizeof(hp))) RETURN(-EFAULT); hpk.hpk_fid = hp.hp_fid; @@ -1876,13 +1876,13 @@ out_rmdir: OBD_ALLOC_PTR(copy); if (copy == NULL) RETURN(-ENOMEM); - if (cfs_copy_from_user(copy, (char *)arg, sizeof(*copy))) { + if (copy_from_user(copy, (char *)arg, sizeof(*copy))) { OBD_FREE_PTR(copy); RETURN(-EFAULT); } rc = ll_ioc_copy_start(inode->i_sb, copy); - if (cfs_copy_to_user((char *)arg, copy, sizeof(*copy))) + if (copy_to_user((char *)arg, copy, sizeof(*copy))) rc = -EFAULT; OBD_FREE_PTR(copy); @@ -1895,13 +1895,13 @@ out_rmdir: OBD_ALLOC_PTR(copy); if (copy == NULL) RETURN(-ENOMEM); - if (cfs_copy_from_user(copy, (char *)arg, sizeof(*copy))) { + if (copy_from_user(copy, (char *)arg, sizeof(*copy))) { OBD_FREE_PTR(copy); RETURN(-EFAULT); } rc = ll_ioc_copy_end(inode->i_sb, copy); - if (cfs_copy_to_user((char *)arg, copy, sizeof(*copy))) + if (copy_to_user((char *)arg, copy, sizeof(*copy))) rc = -EFAULT; OBD_FREE_PTR(copy); diff --git a/lustre/llite/file.c b/lustre/llite/file.c index cc1697e..8777ba6 100644 --- a/lustre/llite/file.c +++ b/lustre/llite/file.c @@ -54,7 +54,7 @@ struct ll_file_data *ll_file_data_get(void) { struct ll_file_data *fd; - OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, __GFP_IO); if (fd == NULL) return NULL; @@ -2074,7 +2074,7 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct file *file2; struct lustre_swap_layouts lsl; - if (cfs_copy_from_user(&lsl, (char *)arg, + if (copy_from_user(&lsl, (char *)arg, sizeof(struct lustre_swap_layouts))) RETURN(-EFAULT); @@ -2241,7 +2241,7 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data), op_data, NULL); - if (cfs_copy_to_user((char *)arg, hca, sizeof(*hca))) + if (copy_to_user((char *)arg, hca, sizeof(*hca))) rc = -EFAULT; ll_finish_md_op_data(op_data); diff --git a/lustre/llite/llite_internal.h b/lustre/llite/llite_internal.h index daaa371..05100c2 100644 --- a/lustre/llite/llite_internal.h +++ b/lustre/llite/llite_internal.h @@ -296,10 +296,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode) /* default to about 40meg of readahead on a given system. That much tied * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */ -#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - CFS_PAGE_SHIFT)) +#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) /* default to read-ahead full files smaller than 2MB on the second read */ -#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - CFS_PAGE_SHIFT)) +#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) enum ra_stat { RA_STAT_HIT = 0, @@ -609,7 +609,7 @@ struct ll_readahead_state { unsigned long ras_consecutive_stride_requests; }; -extern cfs_mem_cache_t *ll_file_data_slab; +extern struct kmem_cache *ll_file_data_slab; struct lustre_handle; struct ll_file_data { struct ll_readahead_state fd_ras; @@ -655,7 +655,7 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi) #define LLAP_MAGIC 98764321 -extern cfs_mem_cache_t *ll_async_page_slab; +extern struct kmem_cache *ll_async_page_slab; extern size_t ll_async_page_slab_size; void ll_ra_read_in(struct file *f, struct ll_ra_read *rar); @@ -857,7 +857,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry); #else int ll_show_options(struct seq_file *seq, struct vfsmount *vfs); #endif -void ll_dirty_page_discard_warn(cfs_page_t *page, int ioret); +void ll_dirty_page_discard_warn(struct page *page, int ioret); int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req, struct super_block *, struct lookup_intent *); void lustre_dump_dentry(struct dentry *, int recur); @@ -940,7 +940,7 @@ struct vvp_io { /** * locked page returned from vvp_io */ - cfs_page_t *ft_vmpage; + struct page *ft_vmpage; #ifndef HAVE_VM_OP_FAULT struct vm_nopage_api { /** @@ -1099,7 +1099,7 @@ static inline void ll_invalidate_page(struct page *vmpage) if (mapping == NULL) return; - ll_teardown_mmaps(mapping, offset, offset + CFS_PAGE_SIZE); + ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); truncate_complete_page(mapping, vmpage); } @@ -1170,8 +1170,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size); int ll_removexattr(struct dentry *dentry, const char *name); /* llite/remote_perm.c */ -extern cfs_mem_cache_t *ll_remote_perm_cachep; -extern cfs_mem_cache_t *ll_rmtperm_hash_cachep; +extern struct kmem_cache *ll_remote_perm_cachep; +extern struct kmem_cache *ll_rmtperm_hash_cachep; cfs_hlist_head_t *alloc_rmtperm_hash(void); void free_rmtperm_hash(cfs_hlist_head_t *hash); diff --git a/lustre/llite/llite_lib.c b/lustre/llite/llite_lib.c index f84271f..99f8fd6 100644 --- a/lustre/llite/llite_lib.c +++ b/lustre/llite/llite_lib.c @@ -56,7 +56,7 @@ #include #include "llite_internal.h" -cfs_mem_cache_t *ll_file_data_slab; +struct kmem_cache *ll_file_data_slab; CFS_LIST_HEAD(ll_super_blocks); DEFINE_SPINLOCK(ll_sb_lock); @@ -93,7 +93,7 @@ static struct ll_sb_info *ll_init_sbi(void) si_meminfo(&si); pages = si.totalram - si.totalhigh; - if (pages >> (20 - CFS_PAGE_SHIFT) < 512) { + if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) { lru_page_max = pages / 2; } else { lru_page_max = (pages / 4) * 3; @@ -309,15 +309,15 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, valid != CLIENT_CONNECT_MDT_REQD) { char *buf; - OBD_ALLOC_WAIT(buf, CFS_PAGE_SIZE); - obd_connect_flags2str(buf, CFS_PAGE_SIZE, + OBD_ALLOC_WAIT(buf, PAGE_CACHE_SIZE); + obd_connect_flags2str(buf, PAGE_CACHE_SIZE, valid ^ CLIENT_CONNECT_MDT_REQD, ","); LCONSOLE_ERROR_MSG(0x170, "Server %s does not support " "feature(s) needed for correct operation " "of this client (%s). Please upgrade " "server or downgrade client.\n", sbi->ll_md_exp->exp_obd->obd_name, buf); - OBD_FREE(buf, CFS_PAGE_SIZE); + OBD_FREE(buf, PAGE_CACHE_SIZE); GOTO(out_md_fid, err = -EPROTO); } @@ -387,7 +387,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) sbi->ll_md_brw_size = data->ocd_brw_size; else - sbi->ll_md_brw_size = CFS_PAGE_SIZE; + sbi->ll_md_brw_size = PAGE_CACHE_SIZE; if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { LCONSOLE_INFO("Layout lock feature supported.\n"); @@ -2426,11 +2426,11 @@ int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg) if (!obd) RETURN(-ENOENT); - if (cfs_copy_to_user((void *)arg, obd->obd_name, - strlen(obd->obd_name) + 1)) - RETURN(-EFAULT); + if (copy_to_user((void *)arg, obd->obd_name, + strlen(obd->obd_name) + 1)) + RETURN(-EFAULT); - RETURN(0); + RETURN(0); } /** @@ -2485,7 +2485,7 @@ static char* ll_d_path(struct dentry *dentry, char *buf, int bufsize) return path; } -void ll_dirty_page_discard_warn(cfs_page_t *page, int ioret) +void ll_dirty_page_discard_warn(struct page *page, int ioret) { char *buf, *path = NULL; struct dentry *dentry = NULL; diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c index febad1d..aa26615 100644 --- a/lustre/llite/llite_mmap.c +++ b/lustre/llite/llite_mmap.c @@ -65,7 +65,7 @@ void policy_from_vma(ldlm_policy_data_t *policy, size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - (vma->vm_pgoff << CFS_PAGE_SHIFT); + (vma->vm_pgoff << PAGE_CACHE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~CFS_PAGE_MASK; } @@ -576,7 +576,8 @@ static int ll_populate(struct vm_area_struct *area, unsigned long address, /* return the user space pointer that maps to a file offset via a vma */ static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte) { - return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT)); + return vma->vm_start + + (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT)); } @@ -590,7 +591,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1, + unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, last - first + 1, 0); } diff --git a/lustre/llite/lloop.c b/lustre/llite/lloop.c index c494dd2..7a5e000 100644 --- a/lustre/llite/lloop.c +++ b/lustre/llite/lloop.c @@ -224,7 +224,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; bio_for_each_segment(bvec, bio, i) { BUG_ON(bvec->bv_offset != 0); - BUG_ON(bvec->bv_len != CFS_PAGE_SIZE); + BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); pages[page_count] = bvec->bv_page; offsets[page_count] = offset; @@ -518,7 +518,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); - lo->lo_blocksize = CFS_PAGE_SIZE; + lo->lo_blocksize = PAGE_CACHE_SIZE; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; @@ -539,13 +539,13 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, lo->lo_queue->unplug_fn = loop_unplug; #endif - /* queue parameters */ - CLASSERT(CFS_PAGE_SIZE < (1 << (sizeof(unsigned short) * 8))); - blk_queue_logical_block_size(lo->lo_queue, - (unsigned short)CFS_PAGE_SIZE); - blk_queue_max_hw_sectors(lo->lo_queue, - LLOOP_MAX_SEGMENTS << (CFS_PAGE_SHIFT - 9)); - blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); + /* queue parameters */ + CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); + blk_queue_logical_block_size(lo->lo_queue, + (unsigned short)PAGE_CACHE_SIZE); + blk_queue_max_hw_sectors(lo->lo_queue, + LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); + blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); set_capacity(disks[lo->lo_number], size); bd_set_size(bdev, size << 9); diff --git a/lustre/llite/lproc_llite.c b/lustre/llite/lproc_llite.c index d63745d..90dbfef 100644 --- a/lustre/llite/lproc_llite.c +++ b/lustre/llite/lproc_llite.c @@ -248,22 +248,23 @@ static int ll_rd_max_readahead_mb(char *page, char **start, off_t off, } static int ll_wr_max_readahead_mb(struct file *file, const char *buffer, - unsigned long count, void *data) + unsigned long count, void *data) { - struct super_block *sb = data; - struct ll_sb_info *sbi = ll_s2sbi(sb); - int mult, rc, pages_number; + struct super_block *sb = data; + struct ll_sb_info *sbi = ll_s2sbi(sb); + int mult, rc, pages_number; - mult = 1 << (20 - CFS_PAGE_SHIFT); - rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); - if (rc) - return rc; + mult = 1 << (20 - PAGE_CACHE_SHIFT); + rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); + if (rc) + return rc; - if (pages_number < 0 || pages_number > cfs_num_physpages / 2) { - CERROR("can't set file readahead more than %lu MB\n", - cfs_num_physpages >> (20 - CFS_PAGE_SHIFT + 1)); /*1/2 of RAM*/ - return -ERANGE; - } + if (pages_number < 0 || pages_number > num_physpages / 2) { + /* 1/2 of RAM */ + CERROR("can't set file readahead more than %lu MB\n", + num_physpages >> (20 - PAGE_CACHE_SHIFT + 1)); + return -ERANGE; + } spin_lock(&sbi->ll_lock); sbi->ll_ra_info.ra_max_pages = pages_number; @@ -284,7 +285,7 @@ static int ll_rd_max_readahead_per_file_mb(char *page, char **start, off_t off, pages_number = sbi->ll_ra_info.ra_max_pages_per_file; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - CFS_PAGE_SHIFT); + mult = 1 << (20 - PAGE_CACHE_SHIFT); return lprocfs_read_frac_helper(page, count, pages_number, mult); } @@ -295,7 +296,7 @@ static int ll_wr_max_readahead_per_file_mb(struct file *file, const char *buffer struct ll_sb_info *sbi = ll_s2sbi(sb); int mult, rc, pages_number; - mult = 1 << (20 - CFS_PAGE_SHIFT); + mult = 1 << (20 - PAGE_CACHE_SHIFT); rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); if (rc) return rc; @@ -327,31 +328,32 @@ static int ll_rd_max_read_ahead_whole_mb(char *page, char **start, off_t off, pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - CFS_PAGE_SHIFT); + mult = 1 << (20 - PAGE_CACHE_SHIFT); return lprocfs_read_frac_helper(page, count, pages_number, mult); } static int ll_wr_max_read_ahead_whole_mb(struct file *file, const char *buffer, - unsigned long count, void *data) + unsigned long count, void *data) { - struct super_block *sb = data; - struct ll_sb_info *sbi = ll_s2sbi(sb); - int mult, rc, pages_number; - - mult = 1 << (20 - CFS_PAGE_SHIFT); - rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); - if (rc) - return rc; + struct super_block *sb = data; + struct ll_sb_info *sbi = ll_s2sbi(sb); + int mult, rc, pages_number; - /* Cap this at the current max readahead window size, the readahead - * algorithm does this anyway so it's pointless to set it larger. */ - if (pages_number < 0 || - pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { - CERROR("can't set max_read_ahead_whole_mb more than " - "max_read_ahead_per_file_mb: %lu\n", - sbi->ll_ra_info.ra_max_pages_per_file >> (20 - CFS_PAGE_SHIFT)); - return -ERANGE; - } + mult = 1 << (20 - PAGE_CACHE_SHIFT); + rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); + if (rc) + return rc; + + /* Cap this at the current max readahead window size, the readahead + * algorithm does this anyway so it's pointless to set it larger. */ + if (pages_number < 0 || + pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { + CERROR("can't set max_read_ahead_whole_mb more than " + "max_read_ahead_per_file_mb: %lu\n", + sbi->ll_ra_info.ra_max_pages_per_file >> + (20 - PAGE_CACHE_SHIFT)); + return -ERANGE; + } spin_lock(&sbi->ll_lock); sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number; @@ -366,7 +368,7 @@ static int ll_rd_max_cached_mb(char *page, char **start, off_t off, struct super_block *sb = data; struct ll_sb_info *sbi = ll_s2sbi(sb); struct cl_client_cache *cache = &sbi->ll_cache; - int shift = 20 - CFS_PAGE_SHIFT; + int shift = 20 - PAGE_CACHE_SHIFT; int max_cached_mb; int unused_mb; @@ -397,16 +399,16 @@ static int ll_wr_max_cached_mb(struct file *file, const char *buffer, int nrpages = 0; ENTRY; - mult = 1 << (20 - CFS_PAGE_SHIFT); + mult = 1 << (20 - PAGE_CACHE_SHIFT); buffer = lprocfs_find_named_value(buffer, "max_cached_mb:", &count); rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); if (rc) RETURN(rc); - if (pages_number < 0 || pages_number > cfs_num_physpages) { + if (pages_number < 0 || pages_number > num_physpages) { CERROR("%s: can't set max cache more than %lu MB\n", ll_get_fsname(sb, NULL, 0), - cfs_num_physpages >> (20 - CFS_PAGE_SHIFT)); + num_physpages >> (20 - PAGE_CACHE_SHIFT)); RETURN(-ERANGE); } diff --git a/lustre/llite/remote_perm.c b/lustre/llite/remote_perm.c index 4d215a7..c9faae6 100644 --- a/lustre/llite/remote_perm.c +++ b/lustre/llite/remote_perm.c @@ -55,14 +55,14 @@ #include #include "llite_internal.h" -cfs_mem_cache_t *ll_remote_perm_cachep = NULL; -cfs_mem_cache_t *ll_rmtperm_hash_cachep = NULL; +struct kmem_cache *ll_remote_perm_cachep; +struct kmem_cache *ll_rmtperm_hash_cachep; static inline struct ll_remote_perm *alloc_ll_remote_perm(void) { struct ll_remote_perm *lrp; - OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, CFS_ALLOC_KERNEL); + OBD_SLAB_ALLOC_PTR_GFP(lrp, ll_remote_perm_cachep, GFP_KERNEL); if (lrp) CFS_INIT_HLIST_NODE(&lrp->lrp_list); return lrp; @@ -85,7 +85,7 @@ cfs_hlist_head_t *alloc_rmtperm_hash(void) OBD_SLAB_ALLOC_GFP(hash, ll_rmtperm_hash_cachep, REMOTE_PERM_HASHSIZE * sizeof(*hash), - CFS_ALLOC_STD); + GFP_IOFS); if (!hash) return NULL; diff --git a/lustre/llite/rw.c b/lustre/llite/rw.c index 5b32414..b6eb577 100644 --- a/lustre/llite/rw.c +++ b/lustre/llite/rw.c @@ -154,10 +154,11 @@ static struct ll_cl_context *ll_cl_init(struct file *file, */ io->ci_lockreq = CILR_NEVER; - pos = (vmpage->index << CFS_PAGE_SHIFT); + pos = (vmpage->index << PAGE_CACHE_SHIFT); - /* Create a temp IO to serve write. */ - result = cl_io_rw_init(env, io, CIT_WRITE, pos, CFS_PAGE_SIZE); + /* Create a temp IO to serve write. */ + result = cl_io_rw_init(env, io, CIT_WRITE, + pos, PAGE_CACHE_SIZE); if (result == 0) { cio->cui_fd = LUSTRE_FPRIVATE(file); cio->cui_iov = NULL; @@ -567,12 +568,12 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, * sense to tune the i_blkbits value for the file based on the OSTs it is * striped over, rather than having a constant value for all files here. */ -/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - CFS_PAGE_SHIFT)). +/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). * Temprarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled * by default, this should be adjusted corresponding with max_read_ahead_mb * and max_read_ahead_per_file_mb otherwise the readahead budget can be used * up quickly which will affect read performance siginificantly. See LU-2816 */ -#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> CFS_PAGE_SHIFT) +#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) static inline int stride_io_mode(struct ll_readahead_state *ras) { @@ -780,7 +781,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, end = rpc_boundary; /* Truncate RA window to end of file */ - end = min(end, (unsigned long)((kms - 1) >> CFS_PAGE_SHIFT)); + end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); ras->ras_next_readahead = max(end, end + 1); RAS_CDEBUG(ras); @@ -818,7 +819,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, if (reserved != 0) ll_ra_count_put(ll_i2sbi(inode), reserved); - if (ra_end == end + 1 && ra_end == (kms >> CFS_PAGE_SHIFT)) + if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) ll_ra_stats_inc(mapping, RA_STAT_EOF); /* if we didn't get to the end of the region we reserved from @@ -1030,8 +1031,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (ras->ras_requests == 2 && !ras->ras_request_index) { __u64 kms_pages; - kms_pages = (i_size_read(inode) + CFS_PAGE_SIZE - 1) >> - CFS_PAGE_SHIFT; + kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; CDEBUG(D_READA, "kmsp "LPU64" mwp %lu mp %lu\n", kms_pages, ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); @@ -1212,7 +1213,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) * breaking kernel which assumes ->writepage should mark * PageWriteback or clean the page. */ result = cl_sync_file_range(inode, offset, - offset + CFS_PAGE_SIZE - 1, + offset + PAGE_CACHE_SIZE - 1, CL_FSYNC_LOCAL, 1); if (result > 0) { /* actually we may have written more than one page. @@ -1250,7 +1251,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) ENTRY; if (wbc->range_cyclic) { - start = mapping->writeback_index << CFS_PAGE_SHIFT; + start = mapping->writeback_index << PAGE_CACHE_SHIFT; end = OBD_OBJECT_EOF; } else { start = wbc->range_start; @@ -1279,7 +1280,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { if (end == OBD_OBJECT_EOF) end = i_size_read(inode); - mapping->writeback_index = (end >> CFS_PAGE_SHIFT) + 1; + mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; } RETURN(result); } diff --git a/lustre/llite/rw26.c b/lustre/llite/rw26.c index 4214b94..2d10dd5 100644 --- a/lustre/llite/rw26.c +++ b/lustre/llite/rw26.c @@ -203,8 +203,9 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, return -EFBIG; } - *max_pages = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; - *max_pages -= user_addr >> CFS_PAGE_SHIFT; + *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + *max_pages -= user_addr >> PAGE_CACHE_SHIFT; OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages)); if (*pages) { @@ -281,9 +282,9 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io, /* check the page type: if the page is a host page, then do * write directly */ if (clp->cp_type == CPT_CACHEABLE) { - cfs_page_t *vmpage = cl_page_vmpage(env, clp); - cfs_page_t *src_page; - cfs_page_t *dst_page; + struct page *vmpage = cl_page_vmpage(env, clp); + struct page *src_page; + struct page *dst_page; void *src; void *dst; @@ -370,7 +371,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, * representing PAGE_SIZE worth of user data, into a single buffer, and * then truncate this to be a full-sized RPC. For 4kB PAGE_SIZE this is * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ -#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * CFS_PAGE_SIZE) & \ +#define MAX_DIO_SIZE ((MAX_MALLOC / sizeof(struct brw_page) * PAGE_CACHE_SIZE) & \ ~(DT_MAX_BRW_SIZE - 1)) static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t file_offset, @@ -399,8 +400,8 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), " "offset=%lld=%llx, pages %lu (max %lu)\n", inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, - file_offset, file_offset, count >> CFS_PAGE_SHIFT, - MAX_DIO_SIZE >> CFS_PAGE_SHIFT); + file_offset, file_offset, count >> PAGE_CACHE_SHIFT, + MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); /* Check that all user buffers are aligned as well */ for (seg = 0; seg < nr_segs; seg++) { @@ -443,7 +444,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, &pages, &max_pages); if (likely(page_count > 0)) { if (unlikely(page_count < max_pages)) - bytes = page_count << CFS_PAGE_SHIFT; + bytes = page_count << PAGE_CACHE_SHIFT; result = ll_direct_IO_26_seg(env, io, rw, inode, file->f_mapping, bytes, file_offset, @@ -461,8 +462,8 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb, * We should always be able to kmalloc for a * page worth of page pointers = 4MB on i386. */ if (result == -ENOMEM && - size > (CFS_PAGE_SIZE / sizeof(*pages)) * - CFS_PAGE_SIZE) { + size > (PAGE_CACHE_SIZE / sizeof(*pages)) * + PAGE_CACHE_SIZE) { size = ((((size / 2) - 1) | ~CFS_PAGE_MASK) + 1) & CFS_PAGE_MASK; diff --git a/lustre/llite/super25.c b/lustre/llite/super25.c index 90949ff..6567d07 100644 --- a/lustre/llite/super25.c +++ b/lustre/llite/super25.c @@ -47,18 +47,18 @@ #include #include "llite_internal.h" -static cfs_mem_cache_t *ll_inode_cachep; +static struct kmem_cache *ll_inode_cachep; static struct inode *ll_alloc_inode(struct super_block *sb) { - struct ll_inode_info *lli; - ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1); - OBD_SLAB_ALLOC_PTR_GFP(lli, ll_inode_cachep, CFS_ALLOC_IO); - if (lli == NULL) - return NULL; - - inode_init_once(&lli->lli_vfs_inode); - return &lli->lli_vfs_inode; + struct ll_inode_info *lli; + ll_stats_ops_tally(ll_s2sbi(sb), LPROC_LL_ALLOC_INODE, 1); + OBD_SLAB_ALLOC_PTR_GFP(lli, ll_inode_cachep, __GFP_IO); + if (lli == NULL) + return NULL; + + inode_init_once(&lli->lli_vfs_inode); + return &lli->lli_vfs_inode; } static void ll_destroy_inode(struct inode *inode) @@ -69,20 +69,17 @@ static void ll_destroy_inode(struct inode *inode) int ll_init_inodecache(void) { - ll_inode_cachep = cfs_mem_cache_create("lustre_inode_cache", - sizeof(struct ll_inode_info), - 0, CFS_SLAB_HWCACHE_ALIGN); - if (ll_inode_cachep == NULL) - return -ENOMEM; - return 0; + ll_inode_cachep = kmem_cache_create("lustre_inode_cache", + sizeof(struct ll_inode_info), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (ll_inode_cachep == NULL) + return -ENOMEM; + return 0; } void ll_destroy_inodecache(void) { - int rc; - - rc = cfs_mem_cache_destroy(ll_inode_cachep); - LASSERTF(rc == 0, "ll_inode_cache: not all structures were freed\n"); + kmem_cache_destroy(ll_inode_cachep); } /* exported operations */ @@ -126,36 +123,36 @@ static int __init init_lustre_lite(void) rc = ll_init_inodecache(); if (rc) return -ENOMEM; - ll_file_data_slab = cfs_mem_cache_create("ll_file_data", - sizeof(struct ll_file_data), 0, - CFS_SLAB_HWCACHE_ALIGN); - if (ll_file_data_slab == NULL) { - ll_destroy_inodecache(); - return -ENOMEM; - } - - ll_remote_perm_cachep = cfs_mem_cache_create("ll_remote_perm_cache", - sizeof(struct ll_remote_perm), - 0, 0); - if (ll_remote_perm_cachep == NULL) { - cfs_mem_cache_destroy(ll_file_data_slab); - ll_file_data_slab = NULL; - ll_destroy_inodecache(); - return -ENOMEM; - } - - ll_rmtperm_hash_cachep = cfs_mem_cache_create("ll_rmtperm_hash_cache", - REMOTE_PERM_HASHSIZE * - sizeof(cfs_list_t), - 0, 0); - if (ll_rmtperm_hash_cachep == NULL) { - cfs_mem_cache_destroy(ll_remote_perm_cachep); - ll_remote_perm_cachep = NULL; - cfs_mem_cache_destroy(ll_file_data_slab); - ll_file_data_slab = NULL; - ll_destroy_inodecache(); - return -ENOMEM; - } + ll_file_data_slab = kmem_cache_create("ll_file_data", + sizeof(struct ll_file_data), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (ll_file_data_slab == NULL) { + ll_destroy_inodecache(); + return -ENOMEM; + } + + ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache", + sizeof(struct ll_remote_perm), + 0, 0, NULL); + if (ll_remote_perm_cachep == NULL) { + kmem_cache_destroy(ll_file_data_slab); + ll_file_data_slab = NULL; + ll_destroy_inodecache(); + return -ENOMEM; + } + + ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache", + REMOTE_PERM_HASHSIZE * + sizeof(cfs_list_t), + 0, 0, NULL); + if (ll_rmtperm_hash_cachep == NULL) { + kmem_cache_destroy(ll_remote_perm_cachep); + ll_remote_perm_cachep = NULL; + kmem_cache_destroy(ll_file_data_slab); + ll_file_data_slab = NULL; + ll_destroy_inodecache(); + return -ENOMEM; + } proc_lustre_fs_root = proc_lustre_root ? lprocfs_register("llite", proc_lustre_root, NULL, NULL) : NULL; @@ -195,8 +192,6 @@ static int __init init_lustre_lite(void) static void __exit exit_lustre_lite(void) { - int rc; - vvp_global_fini(); del_timer(&ll_capa_timer); ll_capa_thread_stop(); @@ -211,18 +206,15 @@ static void __exit exit_lustre_lite(void) ll_destroy_inodecache(); - rc = cfs_mem_cache_destroy(ll_rmtperm_hash_cachep); - LASSERTF(rc == 0, "couldn't destroy ll_rmtperm_hash_cachep\n"); - ll_rmtperm_hash_cachep = NULL; + kmem_cache_destroy(ll_rmtperm_hash_cachep); + ll_rmtperm_hash_cachep = NULL; - rc = cfs_mem_cache_destroy(ll_remote_perm_cachep); - LASSERTF(rc == 0, "couldn't destroy ll_remote_perm_cachep\n"); - ll_remote_perm_cachep = NULL; + kmem_cache_destroy(ll_remote_perm_cachep); + ll_remote_perm_cachep = NULL; - rc = cfs_mem_cache_destroy(ll_file_data_slab); - LASSERTF(rc == 0, "couldn't destroy ll_file_data slab\n"); - if (proc_lustre_fs_root) - lprocfs_remove(&proc_lustre_fs_root); + kmem_cache_destroy(ll_file_data_slab); + if (proc_lustre_fs_root) + lprocfs_remove(&proc_lustre_fs_root); } MODULE_AUTHOR("Sun Microsystems, Inc. "); diff --git a/lustre/llite/vvp_dev.c b/lustre/llite/vvp_dev.c index bdb4cc8..c0a636d 100644 --- a/lustre/llite/vvp_dev.c +++ b/lustre/llite/vvp_dev.c @@ -60,8 +60,8 @@ * "llite_" (var. "ll_") prefix. */ -cfs_mem_cache_t *vvp_thread_kmem; -static cfs_mem_cache_t *vvp_session_kmem; +struct kmem_cache *vvp_thread_kmem; +static struct kmem_cache *vvp_session_kmem; static struct lu_kmem_descr vvp_caches[] = { { .ckd_cache = &vvp_thread_kmem, @@ -79,14 +79,14 @@ static struct lu_kmem_descr vvp_caches[] = { }; static void *vvp_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { - struct vvp_thread_info *info; + struct vvp_thread_info *info; - OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, CFS_ALLOC_IO); - if (info == NULL) - info = ERR_PTR(-ENOMEM); - return info; + OBD_SLAB_ALLOC_PTR_GFP(info, vvp_thread_kmem, __GFP_IO); + if (info == NULL) + info = ERR_PTR(-ENOMEM); + return info; } static void vvp_key_fini(const struct lu_context *ctx, @@ -97,14 +97,14 @@ static void vvp_key_fini(const struct lu_context *ctx, } static void *vvp_session_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { - struct vvp_session *session; + struct vvp_session *session; - OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, CFS_ALLOC_IO); - if (session == NULL) - session = ERR_PTR(-ENOMEM); - return session; + OBD_SLAB_ALLOC_PTR_GFP(session, vvp_session_kmem, __GFP_IO); + if (session == NULL) + session = ERR_PTR(-ENOMEM); + return session; } static void vvp_session_key_fini(const struct lu_context *ctx, @@ -403,9 +403,9 @@ static loff_t vvp_pgcache_find(const struct lu_env *env, static void vvp_pgcache_page_show(const struct lu_env *env, struct seq_file *seq, struct cl_page *page) { - struct ccc_page *cpg; - cfs_page_t *vmpage; - int has_flags; + struct ccc_page *cpg; + struct page *vmpage; + int has_flags; cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type)); vmpage = cpg->cpg_page; diff --git a/lustre/llite/vvp_internal.h b/lustre/llite/vvp_internal.h index bff39b4..3abf4a7 100644 --- a/lustre/llite/vvp_internal.h +++ b/lustre/llite/vvp_internal.h @@ -54,14 +54,14 @@ int vvp_lock_init (const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); int vvp_page_init (const struct lu_env *env, - struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_object *obj, + struct cl_page *page, struct page *vmpage); struct lu_object *vvp_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); struct ccc_object *cl_inode2ccc(struct inode *inode); -extern cfs_mem_cache_t *vvp_thread_kmem; +extern struct kmem_cache *vvp_thread_kmem; #endif /* VVP_INTERNAL_H */ diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index 7aded2e..0908703 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -516,10 +516,7 @@ static int vvp_io_read_start(const struct lu_env *env, if (!vio->cui_ra_window_set) { vio->cui_ra_window_set = 1; bead->lrr_start = cl_index(obj, pos); - /* - * XXX: explicit CFS_PAGE_SIZE - */ - bead->lrr_count = cl_index(obj, tot + CFS_PAGE_SIZE - 1); + bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); ll_ra_read_in(file, bead); } @@ -626,7 +623,7 @@ static int vvp_io_write_start(const struct lu_env *env, #ifndef HAVE_VM_OP_FAULT static int vvp_io_kernel_fault(struct vvp_fault_io *cfio) { - cfs_page_t *vmpage; + struct page *vmpage; vmpage = filemap_nopage(cfio->ft_vma, cfio->nopage.ft_address, cfio->nopage.ft_type); @@ -698,7 +695,7 @@ static int vvp_io_fault_start(const struct lu_env *env, struct vvp_fault_io *cfio = &vio->u.fault; loff_t offset; int result = 0; - cfs_page_t *vmpage = NULL; + struct page *vmpage = NULL; struct cl_page *page; loff_t size; pgoff_t last; /* last page in a file data region */ @@ -865,7 +862,7 @@ static int vvp_io_read_page(const struct lu_env *env, struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd; struct ll_readahead_state *ras = &fd->fd_ras; - cfs_page_t *vmpage = cp->cpg_page; + struct page *vmpage = cp->cpg_page; struct cl_2queue *queue = &io->ci_queue; int rc; @@ -984,7 +981,7 @@ static int vvp_io_prepare_write(const struct lu_env *env, struct cl_object *obj = slice->cpl_obj; struct ccc_page *cp = cl2ccc_page(slice); struct cl_page *pg = slice->cpl_page; - cfs_page_t *vmpage = cp->cpg_page; + struct page *vmpage = cp->cpg_page; int result; @@ -1001,7 +998,7 @@ static int vvp_io_prepare_write(const struct lu_env *env, * We're completely overwriting an existing page, so _don't_ * set it up to date until commit_write */ - if (from == 0 && to == CFS_PAGE_SIZE) { + if (from == 0 && to == PAGE_CACHE_SIZE) { CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); POISON_PAGE(page, 0x11); } else @@ -1024,7 +1021,7 @@ static int vvp_io_commit_write(const struct lu_env *env, struct inode *inode = ccc_object_inode(obj); struct ll_sb_info *sbi = ll_i2sbi(inode); struct ll_inode_info *lli = ll_i2info(inode); - cfs_page_t *vmpage = cp->cpg_page; + struct page *vmpage = cp->cpg_page; int result; int tallyop; @@ -1066,7 +1063,7 @@ static int vvp_io_commit_write(const struct lu_env *env, set_page_dirty(vmpage); vvp_write_pending(cl2ccc(obj), cp); } else if (result == -EDQUOT) { - pgoff_t last_index = i_size_read(inode) >> CFS_PAGE_SHIFT; + pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; bool need_clip = true; /* @@ -1084,7 +1081,7 @@ static int vvp_io_commit_write(const struct lu_env *env, * being. */ if (last_index > pg->cp_index) { - to = CFS_PAGE_SIZE; + to = PAGE_CACHE_SIZE; need_clip = false; } else if (last_index == pg->cp_index) { int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; diff --git a/lustre/llite/vvp_page.c b/lustre/llite/vvp_page.c index 4b85a23..1323b36 100644 --- a/lustre/llite/vvp_page.c +++ b/lustre/llite/vvp_page.c @@ -58,32 +58,32 @@ static void vvp_page_fini_common(struct ccc_page *cp) { - cfs_page_t *vmpage = cp->cpg_page; + struct page *vmpage = cp->cpg_page; - LASSERT(vmpage != NULL); - page_cache_release(vmpage); + LASSERT(vmpage != NULL); + page_cache_release(vmpage); } static void vvp_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) + struct cl_page_slice *slice) { - struct ccc_page *cp = cl2ccc_page(slice); - cfs_page_t *vmpage = cp->cpg_page; + struct ccc_page *cp = cl2ccc_page(slice); + struct page *vmpage = cp->cpg_page; - /* - * vmpage->private was already cleared when page was moved into - * VPG_FREEING state. - */ - LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); - vvp_page_fini_common(cp); + /* + * vmpage->private was already cleared when page was moved into + * VPG_FREEING state. + */ + LASSERT((struct cl_page *)vmpage->private != slice->cpl_page); + vvp_page_fini_common(cp); } static int vvp_page_own(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *io, int nonblock) { - struct ccc_page *vpg = cl2ccc_page(slice); - cfs_page_t *vmpage = vpg->cpg_page; + struct ccc_page *vpg = cl2ccc_page(slice); + struct page *vmpage = vpg->cpg_page; LASSERT(vmpage != NULL); if (nonblock) { @@ -104,44 +104,44 @@ static int vvp_page_own(const struct lu_env *env, } static void vvp_page_assume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) + const struct cl_page_slice *slice, + struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); - wait_on_page_writeback(vmpage); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); + wait_on_page_writeback(vmpage); } static void vvp_page_unassume(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) + const struct cl_page_slice *slice, + struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); } static void vvp_page_disown(const struct lu_env *env, - const struct cl_page_slice *slice, struct cl_io *io) + const struct cl_page_slice *slice, struct cl_io *io) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); - unlock_page(cl2vm_page(slice)); + unlock_page(cl2vm_page(slice)); } static void vvp_page_discard(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); struct address_space *mapping; - struct ccc_page *cpg = cl2ccc_page(slice); + struct ccc_page *cpg = cl2ccc_page(slice); LASSERT(vmpage != NULL); LASSERT(PageLocked(vmpage)); @@ -159,30 +159,30 @@ static void vvp_page_discard(const struct lu_env *env, } static int vvp_page_unmap(const struct lu_env *env, - const struct cl_page_slice *slice, - struct cl_io *unused) + const struct cl_page_slice *slice, + struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); __u64 offset; - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); - offset = vmpage->index << CFS_PAGE_SHIFT; + offset = vmpage->index << PAGE_CACHE_SHIFT; - /* - * XXX is it safe to call this with the page lock held? - */ - ll_teardown_mmaps(vmpage->mapping, offset, offset + CFS_PAGE_SIZE); - return 0; + /* + * XXX is it safe to call this with the page lock held? + */ + ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); + return 0; } static void vvp_page_delete(const struct lu_env *env, const struct cl_page_slice *slice) { - cfs_page_t *vmpage = cl2vm_page(slice); - struct inode *inode = vmpage->mapping->host; - struct cl_object *obj = slice->cpl_obj; + struct page *vmpage = cl2vm_page(slice); + struct inode *inode = vmpage->mapping->host; + struct cl_object *obj = slice->cpl_obj; LASSERT(PageLocked(vmpage)); LASSERT((struct cl_page *)vmpage->private == slice->cpl_page); @@ -198,17 +198,17 @@ static void vvp_page_delete(const struct lu_env *env, } static void vvp_page_export(const struct lu_env *env, - const struct cl_page_slice *slice, - int uptodate) + const struct cl_page_slice *slice, + int uptodate) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); - LASSERT(vmpage != NULL); - LASSERT(PageLocked(vmpage)); - if (uptodate) - SetPageUptodate(vmpage); - else - ClearPageUptodate(vmpage); + LASSERT(vmpage != NULL); + LASSERT(PageLocked(vmpage)); + if (uptodate) + SetPageUptodate(vmpage); + else + ClearPageUptodate(vmpage); } static int vvp_page_is_vmlocked(const struct lu_env *env, @@ -230,7 +230,7 @@ static int vvp_page_prep_write(const struct lu_env *env, const struct cl_page_slice *slice, struct cl_io *unused) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); LASSERT(PageLocked(vmpage)); LASSERT(!PageDirty(vmpage)); @@ -247,7 +247,7 @@ static int vvp_page_prep_write(const struct lu_env *env, * This takes inode as a separate argument, because inode on which error is to * be set can be different from \a vmpage inode in case of direct-io. */ -static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret) +static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret) { struct ccc_object *obj = cl_inode2ccc(inode); @@ -274,7 +274,7 @@ static void vvp_page_completion_read(const struct lu_env *env, int ioret) { struct ccc_page *cp = cl2ccc_page(slice); - cfs_page_t *vmpage = cp->cpg_page; + struct page *vmpage = cp->cpg_page; struct cl_page *page = cl_page_top(slice->cpl_page); struct inode *inode = ccc_object_inode(page->cp_obj); ENTRY; @@ -301,10 +301,10 @@ static void vvp_page_completion_write(const struct lu_env *env, const struct cl_page_slice *slice, int ioret) { - struct ccc_page *cp = cl2ccc_page(slice); - struct cl_page *pg = slice->cpl_page; - cfs_page_t *vmpage = cp->cpg_page; - ENTRY; + struct ccc_page *cp = cl2ccc_page(slice); + struct cl_page *pg = slice->cpl_page; + struct page *vmpage = cp->cpg_page; + ENTRY; LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage))); LASSERT(PageWriteback(vmpage)); @@ -352,7 +352,7 @@ static void vvp_page_completion_write(const struct lu_env *env, static int vvp_page_make_ready(const struct lu_env *env, const struct cl_page_slice *slice) { - cfs_page_t *vmpage = cl2vm_page(slice); + struct page *vmpage = cl2vm_page(slice); struct cl_page *pg = slice->cpl_page; int result = 0; @@ -382,8 +382,8 @@ static int vvp_page_print(const struct lu_env *env, const struct cl_page_slice *slice, void *cookie, lu_printer_t printer) { - struct ccc_page *vp = cl2ccc_page(slice); - cfs_page_t *vmpage = vp->cpg_page; + struct ccc_page *vp = cl2ccc_page(slice); + struct page *vmpage = vp->cpg_page; (*printer)(env, cookie, LUSTRE_VVP_NAME"-page@%p(%d:%d:%d) " "vm@%p ", @@ -534,7 +534,7 @@ static const struct cl_page_operations vvp_transient_page_ops = { }; int vvp_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) + struct cl_page *page, struct page *vmpage) { struct ccc_page *cpg = cl_object_page_slice(obj, page); diff --git a/lustre/lmv/lmv_obd.c b/lustre/lmv/lmv_obd.c index d226bbc..0868280 100644 --- a/lustre/lmv/lmv_obd.c +++ b/lustre/lmv/lmv_obd.c @@ -846,7 +846,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, RETURN(-EINVAL); /* copy UUID */ - if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd), + if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd), min((int) data->ioc_plen2, (int) sizeof(struct obd_uuid)))) RETURN(-EFAULT); @@ -856,7 +856,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, 0); if (rc) RETURN(rc); - if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf, + if (copy_to_user(data->ioc_pbuf1, &stat_buf, min((int) data->ioc_plen1, (int) sizeof(stat_buf)))) RETURN(-EFAULT); @@ -1944,7 +1944,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, * |s|e|f|p|ent| 0 | ... | 0 | * '----------------- -----' * - * However, on hosts where the native VM page size (CFS_PAGE_SIZE) is + * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is * larger than LU_PAGE_SIZE, a single host page may contain multiple * lu_dirpages. After reading the lu_dirpages from the MDS, the * ldp_hash_end of the first lu_dirpage refers to the one immediately @@ -1975,13 +1975,13 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span * to the first entry of the next lu_dirpage. */ -#if CFS_PAGE_SIZE > LU_PAGE_SIZE +#if PAGE_CACHE_SIZE > LU_PAGE_SIZE static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) { int i; for (i = 0; i < ncfspgs; i++) { - struct lu_dirpage *dp = cfs_kmap(pages[i]); + struct lu_dirpage *dp = kmap(pages[i]); struct lu_dirpage *first = dp; struct lu_dirent *end_dirent = NULL; struct lu_dirent *ent; @@ -2020,12 +2020,12 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE); first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE); - cfs_kunmap(pages[i]); + kunmap(pages[i]); } } #else #define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) -#endif /* CFS_PAGE_SIZE > LU_PAGE_SIZE */ +#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, struct page **pages, struct ptlrpc_request **request) @@ -2034,7 +2034,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, struct lmv_obd *lmv = &obd->u.lmv; __u64 offset = op_data->op_offset; int rc; - int ncfspgs; /* pages read in CFS_PAGE_SIZE */ + int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ int nlupgs; /* pages read in LU_PAGE_SIZE */ struct lmv_tgt_desc *tgt; ENTRY; @@ -2054,8 +2054,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, if (rc != 0) RETURN(rc); - ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + CFS_PAGE_SIZE - 1) - >> CFS_PAGE_SHIFT; + ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages); diff --git a/lustre/lmv/lproc_lmv.c b/lustre/lmv/lproc_lmv.c index cd436a1..6a3b128 100644 --- a/lustre/lmv/lproc_lmv.c +++ b/lustre/lmv/lproc_lmv.c @@ -107,7 +107,7 @@ static int lmv_wr_placement(struct file *file, const char *buffer, placement_policy_t policy; struct lmv_obd *lmv; - if (cfs_copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE)) + if (copy_from_user(dummy, buffer, MAX_POLICY_STRING_SIZE)) return -EFAULT; LASSERT(dev != NULL); diff --git a/lustre/lod/lod_dev.c b/lustre/lod/lod_dev.c index 99791c5..5a0bdb5 100644 --- a/lustre/lod/lod_dev.c +++ b/lustre/lod/lod_dev.c @@ -98,7 +98,7 @@ extern struct lu_object_operations lod_lu_robj_ops; extern struct dt_object_operations lod_obj_ops; /* Slab for OSD object allocation */ -cfs_mem_cache_t *lod_object_kmem; +struct kmem_cache *lod_object_kmem; static struct lu_kmem_descr lod_caches[] = { { @@ -125,7 +125,7 @@ struct lu_object *lod_object_alloc(const struct lu_env *env, int rc = 0; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lod_obj, lod_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lod_obj, lod_object_kmem, __GFP_IO); if (lod_obj == NULL) RETURN(ERR_PTR(-ENOMEM)); diff --git a/lustre/lod/lod_object.c b/lustre/lod/lod_object.c index 9eeda28..5ae976a 100644 --- a/lustre/lod/lod_object.c +++ b/lustre/lod/lod_object.c @@ -49,7 +49,7 @@ #include "lod_internal.h" -extern cfs_mem_cache_t *lod_object_kmem; +extern struct kmem_cache *lod_object_kmem; static const struct dt_body_operations lod_body_lnk_ops; static int lod_index_lookup(const struct lu_env *env, struct dt_object *dt, diff --git a/lustre/lov/lov_cl_internal.h b/lustre/lov/lov_cl_internal.h index 61ee850..c7056c8 100644 --- a/lustre/lov/lov_cl_internal.h +++ b/lustre/lov/lov_cl_internal.h @@ -556,17 +556,17 @@ extern struct lu_device_type lovsub_device_type; extern struct lu_context_key lov_key; extern struct lu_context_key lov_session_key; -extern cfs_mem_cache_t *lov_lock_kmem; -extern cfs_mem_cache_t *lov_object_kmem; -extern cfs_mem_cache_t *lov_thread_kmem; -extern cfs_mem_cache_t *lov_session_kmem; -extern cfs_mem_cache_t *lov_req_kmem; +extern struct kmem_cache *lov_lock_kmem; +extern struct kmem_cache *lov_object_kmem; +extern struct kmem_cache *lov_thread_kmem; +extern struct kmem_cache *lov_session_kmem; +extern struct kmem_cache *lov_req_kmem; -extern cfs_mem_cache_t *lovsub_lock_kmem; -extern cfs_mem_cache_t *lovsub_object_kmem; -extern cfs_mem_cache_t *lovsub_req_kmem; +extern struct kmem_cache *lovsub_lock_kmem; +extern struct kmem_cache *lovsub_object_kmem; +extern struct kmem_cache *lovsub_req_kmem; -extern cfs_mem_cache_t *lov_lock_link_kmem; +extern struct kmem_cache *lov_lock_link_kmem; int lov_object_init (const struct lu_env *env, struct lu_object *obj, const struct lu_object_conf *conf); @@ -601,16 +601,16 @@ int lov_sublock_modify (const struct lu_env *env, struct lov_lock *lov, int lov_page_init (const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_page *page, struct page *vmpage); int lovsub_page_init (const struct lu_env *env, struct cl_object *ob, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_page *page, struct page *vmpage); int lov_page_init_empty (const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_page *page, struct page *vmpage); int lov_page_init_raid0 (const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_page *page, struct page *vmpage); struct lu_object *lov_object_alloc (const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); diff --git a/lustre/lov/lov_dev.c b/lustre/lov/lov_dev.c index 031253a..600522b 100644 --- a/lustre/lov/lov_dev.c +++ b/lustre/lov/lov_dev.c @@ -45,17 +45,17 @@ #include "lov_cl_internal.h" -cfs_mem_cache_t *lov_lock_kmem; -cfs_mem_cache_t *lov_object_kmem; -cfs_mem_cache_t *lov_thread_kmem; -cfs_mem_cache_t *lov_session_kmem; -cfs_mem_cache_t *lov_req_kmem; +struct kmem_cache *lov_lock_kmem; +struct kmem_cache *lov_object_kmem; +struct kmem_cache *lov_thread_kmem; +struct kmem_cache *lov_session_kmem; +struct kmem_cache *lov_req_kmem; -cfs_mem_cache_t *lovsub_lock_kmem; -cfs_mem_cache_t *lovsub_object_kmem; -cfs_mem_cache_t *lovsub_req_kmem; +struct kmem_cache *lovsub_lock_kmem; +struct kmem_cache *lovsub_object_kmem; +struct kmem_cache *lovsub_req_kmem; -cfs_mem_cache_t *lov_lock_link_kmem; +struct kmem_cache *lov_lock_link_kmem; /** Lock class of lov_device::ld_mutex. */ struct lock_class_key cl_lov_device_mutex_class; @@ -143,7 +143,7 @@ static void *lov_key_init(const struct lu_context *ctx, { struct lov_thread_info *info; - OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(info, lov_thread_kmem, __GFP_IO); if (info != NULL) CFS_INIT_LIST_HEAD(&info->lti_closure.clc_list); else @@ -170,7 +170,7 @@ static void *lov_session_key_init(const struct lu_context *ctx, { struct lov_session *info; - OBD_SLAB_ALLOC_PTR_GFP(info, lov_session_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(info, lov_session_kmem, __GFP_IO); if (info == NULL) info = ERR_PTR(-ENOMEM); return info; @@ -261,7 +261,7 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev, int result; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, __GFP_IO); if (lr != NULL) { cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops); result = 0; diff --git a/lustre/lov/lov_ea.c b/lustre/lov/lov_ea.c index bf6d9eb..6ab28e9 100644 --- a/lustre/lov/lov_ea.c +++ b/lustre/lov/lov_ea.c @@ -105,7 +105,7 @@ struct lov_stripe_md *lsm_alloc_plain(__u16 stripe_count, int *size) return NULL;; for (i = 0; i < stripe_count; i++) { - OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(loi, lov_oinfo_slab, __GFP_IO); if (loi == NULL) goto err; lsm->lsm_oinfo[i] = loi; diff --git a/lustre/lov/lov_internal.h b/lustre/lov/lov_internal.h index d9ac87c..767ca73 100644 --- a/lustre/lov/lov_internal.h +++ b/lustre/lov/lov_internal.h @@ -87,7 +87,7 @@ struct lov_request_set { spinlock_t set_lock; }; -extern cfs_mem_cache_t *lov_oinfo_slab; +extern struct kmem_cache *lov_oinfo_slab; void lov_finish_set(struct lov_request_set *set); diff --git a/lustre/lov/lov_lock.c b/lustre/lov/lov_lock.c index 417d025..794354d 100644 --- a/lustre/lov/lov_lock.c +++ b/lustre/lov/lov_lock.c @@ -147,7 +147,7 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env, LASSERT(idx < lck->lls_nr); ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, __GFP_IO); if (link != NULL) { struct lov_sublock_env *subenv; struct lov_lock_sub *lls; @@ -1193,7 +1193,7 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj, int result; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO); if (lck != NULL) { cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops); result = lov_lock_sub_init(env, lck, io); @@ -1229,7 +1229,7 @@ int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj, int result = -ENOMEM; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO); if (lck != NULL) { cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops); lck->lls_orig = lock->cll_descr; diff --git a/lustre/lov/lov_obd.c b/lustre/lov/lov_obd.c index 5843bca..86cdda3 100644 --- a/lustre/lov/lov_obd.c +++ b/lustre/lov/lov_obd.c @@ -1958,9 +1958,9 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, RETURN(-EINVAL); /* copy UUID */ - if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) + if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd), + min((int)data->ioc_plen2, + (int)sizeof(struct obd_uuid)))) RETURN(-EFAULT); flags = uarg ? *(__u32*)uarg : 0; @@ -1970,7 +1970,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, flags); if (rc) RETURN(rc); - if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf, + if (copy_to_user(data->ioc_pbuf1, &stat_buf, min((int) data->ioc_plen1, (int) sizeof(stat_buf)))) RETURN(-EFAULT); @@ -2016,7 +2016,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len, *genp = lov->lov_tgts[i]->ltd_gen; } - if (cfs_copy_to_user((void *)uarg, buf, len)) + if (copy_to_user((void *)uarg, buf, len)) rc = -EFAULT; obd_ioctl_freedata(buf, len); break; @@ -2879,14 +2879,14 @@ struct obd_ops lov_obd_ops = { .o_quotacheck = lov_quotacheck, }; -cfs_mem_cache_t *lov_oinfo_slab; +struct kmem_cache *lov_oinfo_slab; extern struct lu_kmem_descr lov_caches[]; int __init lov_init(void) { struct lprocfs_static_vars lvars = { 0 }; - int rc, rc2; + int rc; ENTRY; /* print an address of _any_ initialized kernel symbol from this @@ -2898,9 +2898,9 @@ int __init lov_init(void) if (rc) return rc; - lov_oinfo_slab = cfs_mem_cache_create("lov_oinfo", - sizeof(struct lov_oinfo), - 0, CFS_SLAB_HWCACHE_ALIGN); + lov_oinfo_slab = kmem_cache_create("lov_oinfo", + sizeof(struct lov_oinfo), 0, + SLAB_HWCACHE_ALIGN, NULL); if (lov_oinfo_slab == NULL) { lu_kmem_fini(lov_caches); return -ENOMEM; @@ -2911,8 +2911,7 @@ int __init lov_init(void) LUSTRE_LOV_NAME, &lov_device_type); if (rc) { - rc2 = cfs_mem_cache_destroy(lov_oinfo_slab); - LASSERT(rc2 == 0); + kmem_cache_destroy(lov_oinfo_slab); lu_kmem_fini(lov_caches); } @@ -2922,12 +2921,8 @@ int __init lov_init(void) #ifdef __KERNEL__ static void /*__exit*/ lov_exit(void) { - int rc; - - class_unregister_type(LUSTRE_LOV_NAME); - rc = cfs_mem_cache_destroy(lov_oinfo_slab); - LASSERT(rc == 0); - + class_unregister_type(LUSTRE_LOV_NAME); + kmem_cache_destroy(lov_oinfo_slab); lu_kmem_fini(lov_caches); } diff --git a/lustre/lov/lov_object.c b/lustre/lov/lov_object.c index e1f5b5d..65dd11d 100644 --- a/lustre/lov/lov_object.c +++ b/lustre/lov/lov_object.c @@ -68,7 +68,7 @@ struct lov_layout_operations { int (*llo_print)(const struct lu_env *env, void *cookie, lu_printer_t p, const struct lu_object *o); int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_page *page, struct page *vmpage); int (*llo_lock_init)(const struct lu_env *env, struct cl_object *obj, struct cl_lock *lock, const struct cl_io *io); @@ -814,7 +814,7 @@ static int lov_object_print(const struct lu_env *env, void *cookie, } int lov_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) + struct cl_page *page, struct page *vmpage) { return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page, vmpage); @@ -888,7 +888,7 @@ struct lu_object *lov_object_alloc(const struct lu_env *env, struct lu_object *obj; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, __GFP_IO); if (lov != NULL) { obj = lov2lu(lov); lu_object_init(obj, NULL, dev); diff --git a/lustre/lov/lov_pack.c b/lustre/lov/lov_pack.c index 829c6ac..781c6b4 100644 --- a/lustre/lov/lov_pack.c +++ b/lustre/lov/lov_pack.c @@ -610,7 +610,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, /* we only need the header part from user space to get lmm_magic and * lmm_stripe_count, (the header part is common to v1 and v3) */ lum_size = sizeof(struct lov_user_md_v1); - if (cfs_copy_from_user(&lum, lump, lum_size)) + if (copy_from_user(&lum, lump, lum_size)) GOTO(out_set, rc = -EFAULT); else if ((lum.lmm_magic != LOV_USER_MAGIC) && (lum.lmm_magic != LOV_USER_MAGIC_V3)) @@ -620,7 +620,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, (lum.lmm_stripe_count < lsm->lsm_stripe_count)) { /* Return right size of stripe to user */ lum.lmm_stripe_count = lsm->lsm_stripe_count; - rc = cfs_copy_to_user(lump, &lum, lum_size); + rc = copy_to_user(lump, &lum, lum_size); GOTO(out_set, rc = -EOVERFLOW); } rc = lov_packmd(exp, &lmmk, lsm); @@ -670,7 +670,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm, lum.lmm_layout_gen = lmmk->lmm_layout_gen; ((struct lov_user_md *)lmmk)->lmm_layout_gen = lum.lmm_layout_gen; ((struct lov_user_md *)lmmk)->lmm_stripe_count = lum.lmm_stripe_count; - if (cfs_copy_to_user(lump, lmmk, lmm_size)) + if (copy_to_user(lump, lmmk, lmm_size)) rc = -EFAULT; obd_free_diskmd(exp, &lmmk); diff --git a/lustre/lov/lov_page.c b/lustre/lov/lov_page.c index 1af2c62..c3c9915 100644 --- a/lustre/lov/lov_page.c +++ b/lustre/lov/lov_page.c @@ -159,7 +159,7 @@ static void lov_empty_page_fini(const struct lu_env *env, } int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) + struct cl_page *page, struct page *vmpage) { struct lov_object *loo = cl2lov(obj); struct lov_layout_raid0 *r0 = lov_r0(loo); @@ -217,16 +217,16 @@ static const struct cl_page_operations lov_empty_page_ops = { }; int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) + struct cl_page *page, struct page *vmpage) { struct lov_page *lpg = cl_object_page_slice(obj, page); void *addr; ENTRY; cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops); - addr = cfs_kmap(vmpage); + addr = kmap(vmpage); memset(addr, 0, cl_page_size(obj)); - cfs_kunmap(vmpage); + kunmap(vmpage); cl_page_export(env, page, 1); RETURN(0); } diff --git a/lustre/lov/lovsub_dev.c b/lustre/lov/lovsub_dev.c index 22f8e10..8f58f4c 100644 --- a/lustre/lov/lovsub_dev.c +++ b/lustre/lov/lovsub_dev.c @@ -148,7 +148,7 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev, struct lovsub_req *lsr; int result; - OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lsr, lovsub_req_kmem, __GFP_IO); if (lsr != NULL) { cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops); result = 0; diff --git a/lustre/lov/lovsub_lock.c b/lustre/lov/lovsub_lock.c index a76acf2..3f0593c 100644 --- a/lustre/lov/lovsub_lock.c +++ b/lustre/lov/lovsub_lock.c @@ -472,7 +472,7 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj, int result; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, __GFP_IO); if (lsk != NULL) { CFS_INIT_LIST_HEAD(&lsk->lss_parents); cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops); diff --git a/lustre/lov/lovsub_object.c b/lustre/lov/lovsub_object.c index b8f1b33..66130ea 100644 --- a/lustre/lov/lovsub_object.c +++ b/lustre/lov/lovsub_object.c @@ -151,7 +151,7 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env, struct lu_object *obj; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, __GFP_IO); if (los != NULL) { struct cl_object_header *hdr; diff --git a/lustre/lov/lovsub_page.c b/lustre/lov/lovsub_page.c index 8cbe7495..d14ce67 100644 --- a/lustre/lov/lovsub_page.c +++ b/lustre/lov/lovsub_page.c @@ -62,7 +62,7 @@ static const struct cl_page_operations lovsub_page_ops = { }; int lovsub_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *unused) + struct cl_page *page, struct page *unused) { struct lovsub_page *lsb = cl_object_page_slice(obj, page); ENTRY; diff --git a/lustre/lvfs/fsfilt_ext3.c b/lustre/lvfs/fsfilt_ext3.c index 11951aa..ae23b27 100644 --- a/lustre/lvfs/fsfilt_ext3.c +++ b/lustre/lvfs/fsfilt_ext3.c @@ -76,7 +76,7 @@ #define fsfilt_log_start_commit(journal, tid) jbd2_log_start_commit(journal, tid) #define fsfilt_log_wait_commit(journal, tid) jbd2_log_wait_commit(journal, tid) -static cfs_mem_cache_t *fcb_cache; +static struct kmem_cache *fcb_cache; struct fsfilt_cb_data { struct ext4_journal_cb_entry cb_jcb; /* private data - MUST BE FIRST */ @@ -470,7 +470,7 @@ int fsfilt_ext3_map_ext_inode_pages(struct inode *inode, struct page **page, int pages, unsigned long *blocks, int create) { - int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; int rc = 0, i = 0; struct page *fp = NULL; int clen = 0; @@ -519,7 +519,7 @@ int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page, int pages, unsigned long *blocks, int create) { - int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; unsigned long *b; int rc = 0, i; @@ -739,32 +739,28 @@ static struct fsfilt_operations fsfilt_ext3_ops = { static int __init fsfilt_ext3_init(void) { - int rc; - - fcb_cache = cfs_mem_cache_create("fsfilt_ext3_fcb", - sizeof(struct fsfilt_cb_data), 0, 0); - if (!fcb_cache) { - CERROR("error allocating fsfilt journal callback cache\n"); - GOTO(out, rc = -ENOMEM); - } + int rc; + + fcb_cache = kmem_cache_create("fsfilt_ext3_fcb", + sizeof(struct fsfilt_cb_data), + 0, 0, NULL); + if (!fcb_cache) { + CERROR("error allocating fsfilt journal callback cache\n"); + GOTO(out, rc = -ENOMEM); + } - rc = fsfilt_register_ops(&fsfilt_ext3_ops); + rc = fsfilt_register_ops(&fsfilt_ext3_ops); - if (rc) { - int err = cfs_mem_cache_destroy(fcb_cache); - LASSERTF(err == 0, "error destroying new cache: rc %d\n", err); - } + if (rc) + kmem_cache_destroy(fcb_cache); out: - return rc; + return rc; } static void __exit fsfilt_ext3_exit(void) { - int rc; - - fsfilt_unregister_ops(&fsfilt_ext3_ops); - rc = cfs_mem_cache_destroy(fcb_cache); - LASSERTF(rc == 0, "couldn't destroy fcb_cache slab\n"); + fsfilt_unregister_ops(&fsfilt_ext3_ops); + kmem_cache_destroy(fcb_cache); } module_init(fsfilt_ext3_init); diff --git a/lustre/mdc/mdc_request.c b/lustre/mdc/mdc_request.c index 5c8eab8..2d2167c 100644 --- a/lustre/mdc/mdc_request.c +++ b/lustre/mdc/mdc_request.c @@ -1076,10 +1076,10 @@ restart_bulk: /* NB req now owns desc and will free it when it gets freed */ for (i = 0; i < op_data->op_npages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); mdc_readdir_pack(req, op_data->op_offset, - CFS_PAGE_SIZE * op_data->op_npages, + PAGE_CACHE_SIZE * op_data->op_npages, &op_data->op_fid1, op_data->op_capa1); ptlrpc_request_set_replen(req); @@ -1110,7 +1110,7 @@ restart_bulk: if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", req->rq_bulk->bd_nob_transferred, - CFS_PAGE_SIZE * op_data->op_npages); + PAGE_CACHE_SIZE * op_data->op_npages); ptlrpc_req_finished(req); RETURN(-EPROTO); } @@ -1883,18 +1883,18 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, GOTO(out, rc = -ENODEV); /* copy UUID */ - if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) - GOTO(out, rc = -EFAULT); + if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd), + min((int)data->ioc_plen2, + (int)sizeof(struct obd_uuid)))) + GOTO(out, rc = -EFAULT); - rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf, - cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), - 0); - if (rc != 0) - GOTO(out, rc); + rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf, + cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), + 0); + if (rc != 0) + GOTO(out, rc); - if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf, + if (copy_to_user(data->ioc_pbuf1, &stat_buf, min((int) data->ioc_plen1, (int) sizeof(stat_buf)))) GOTO(out, rc = -EFAULT); @@ -1920,7 +1920,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, break; } case LL_IOC_GET_CONNECT_FLAGS: { - if (cfs_copy_to_user(uarg, + if (copy_to_user(uarg, exp_connect_flags_ptr(exp), sizeof(__u64))) GOTO(out, rc = -EFAULT); diff --git a/lustre/mdd/mdd_device.c b/lustre/mdd/mdd_device.c index 37f6508..c327c9b 100644 --- a/lustre/mdd/mdd_device.c +++ b/lustre/mdd/mdd_device.c @@ -59,7 +59,7 @@ static const char mdd_root_dir_name[] = "ROOT"; static const char mdd_obf_dir_name[] = "fid"; /* Slab for MDD object allocation */ -cfs_mem_cache_t *mdd_object_kmem; +struct kmem_cache *mdd_object_kmem; static struct lu_kmem_descr mdd_caches[] = { { diff --git a/lustre/mdd/mdd_dir.c b/lustre/mdd/mdd_dir.c index 9f3c333..27db0a7 100644 --- a/lustre/mdd/mdd_dir.c +++ b/lustre/mdd/mdd_dir.c @@ -1050,7 +1050,7 @@ int mdd_links_read(const struct lu_env *env, struct mdd_object *mdd_obj, /* First try a small buf */ LASSERT(env != NULL); ldata->ld_buf = lu_buf_check_and_alloc(&mdd_env_info(env)->mti_link_buf, - CFS_PAGE_SIZE); + PAGE_CACHE_SIZE); if (ldata->ld_buf->lb_buf == NULL) return -ENOMEM; diff --git a/lustre/mdd/mdd_lproc.c b/lustre/mdd/mdd_lproc.c index b3259d3..bd23302 100644 --- a/lustre/mdd/mdd_lproc.c +++ b/lustre/mdd/mdd_lproc.c @@ -103,7 +103,7 @@ static int lprocfs_wr_atime_diff(struct file *file, const char *buffer, if (count > (sizeof(kernbuf) - 1)) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; kernbuf[count] = '\0'; @@ -144,29 +144,29 @@ static int lprocfs_rd_changelog_mask(char *page, char **start, off_t off, } static int lprocfs_wr_changelog_mask(struct file *file, const char *buffer, - unsigned long count, void *data) + unsigned long count, void *data) { - struct mdd_device *mdd = data; - char *kernbuf; - int rc; - ENTRY; - - if (count >= CFS_PAGE_SIZE) - RETURN(-EINVAL); - OBD_ALLOC(kernbuf, CFS_PAGE_SIZE); - if (kernbuf == NULL) - RETURN(-ENOMEM); - if (cfs_copy_from_user(kernbuf, buffer, count)) - GOTO(out, rc = -EFAULT); - kernbuf[count] = 0; - - rc = cfs_str2mask(kernbuf, changelog_type2str, &mdd->mdd_cl.mc_mask, - CHANGELOG_MINMASK, CHANGELOG_ALLMASK); - if (rc == 0) - rc = count; + struct mdd_device *mdd = data; + char *kernbuf; + int rc; + ENTRY; + + if (count >= PAGE_CACHE_SIZE) + RETURN(-EINVAL); + OBD_ALLOC(kernbuf, PAGE_CACHE_SIZE); + if (kernbuf == NULL) + RETURN(-ENOMEM); + if (copy_from_user(kernbuf, buffer, count)) + GOTO(out, rc = -EFAULT); + kernbuf[count] = 0; + + rc = cfs_str2mask(kernbuf, changelog_type2str, &mdd->mdd_cl.mc_mask, + CHANGELOG_MINMASK, CHANGELOG_ALLMASK); + if (rc == 0) + rc = count; out: - OBD_FREE(kernbuf, CFS_PAGE_SIZE); - return rc; + OBD_FREE(kernbuf, PAGE_CACHE_SIZE); + return rc; } struct cucb_data { diff --git a/lustre/mdd/mdd_object.c b/lustre/mdd/mdd_object.c index 42f514d..b4f16b3 100644 --- a/lustre/mdd/mdd_object.c +++ b/lustre/mdd/mdd_object.c @@ -58,7 +58,7 @@ #include "mdd_internal.h" static const struct lu_object_operations mdd_lu_obj_ops; -extern cfs_mem_cache_t *mdd_object_kmem; +extern struct kmem_cache *mdd_object_kmem; static int mdd_xattr_get(const struct lu_env *env, struct md_object *obj, struct lu_buf *buf, @@ -145,7 +145,7 @@ struct lu_object *mdd_object_alloc(const struct lu_env *env, { struct mdd_object *mdd_obj; - OBD_SLAB_ALLOC_PTR_GFP(mdd_obj, mdd_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(mdd_obj, mdd_object_kmem, __GFP_IO); if (mdd_obj != NULL) { struct lu_object *o; @@ -1895,12 +1895,12 @@ int mdd_readpage(const struct lu_env *env, struct md_object *obj, LASSERT(rdpg->rp_pages != NULL); pg = rdpg->rp_pages[0]; - dp = (struct lu_dirpage*)cfs_kmap(pg); + dp = (struct lu_dirpage *)kmap(pg); memset(dp, 0 , sizeof(struct lu_dirpage)); dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash); dp->ldp_hash_end = cpu_to_le64(MDS_DIR_END_OFF); dp->ldp_flags = cpu_to_le32(LDF_EMPTY); - cfs_kunmap(pg); + kunmap(pg); GOTO(out_unlock, rc = LU_PAGE_SIZE); } @@ -1909,7 +1909,7 @@ int mdd_readpage(const struct lu_env *env, struct md_object *obj, if (rc >= 0) { struct lu_dirpage *dp; - dp = cfs_kmap(rdpg->rp_pages[0]); + dp = kmap(rdpg->rp_pages[0]); dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash); if (rc == 0) { /* @@ -1919,7 +1919,7 @@ int mdd_readpage(const struct lu_env *env, struct md_object *obj, dp->ldp_flags = cpu_to_le32(LDF_EMPTY); rc = min_t(unsigned int, LU_PAGE_SIZE, rdpg->rp_count); } - cfs_kunmap(rdpg->rp_pages[0]); + kunmap(rdpg->rp_pages[0]); } GOTO(out_unlock, rc); diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index 78d994c..d6c94f8 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -99,7 +99,7 @@ static int mdt_unpack_req_pack_rep(struct mdt_thread_info *info, __u32 flags); static const struct lu_object_operations mdt_obj_ops; /* Slab for MDT object allocation */ -static cfs_mem_cache_t *mdt_object_kmem; +static struct kmem_cache *mdt_object_kmem; static struct lu_kmem_descr mdt_caches[] = { { @@ -793,7 +793,7 @@ static int mdt_getattr_internal(struct mdt_thread_info *info, PFID(mdt_object_fid(o)), rc); rc = -EFAULT; } else { - int print_limit = min_t(int, CFS_PAGE_SIZE - 128, rc); + int print_limit = min_t(int, PAGE_CACHE_SIZE - 128, rc); if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO)) rc -= 2; @@ -1634,7 +1634,7 @@ static int mdt_sendpage(struct mdt_thread_info *info, for (i = 0, tmpcount = nob; i < rdpg->rp_npages && tmpcount > 0; i++, tmpcount -= tmpsize) { - tmpsize = min_t(int, tmpcount, CFS_PAGE_SIZE); + tmpsize = min_t(int, tmpcount, PAGE_CACHE_SIZE); ptlrpc_prep_bulk_page_pin(desc, rdpg->rp_pages[i], 0, tmpsize); } @@ -1679,14 +1679,14 @@ int mdt_readpage(struct mdt_thread_info *info) rdpg->rp_attrs |= LUDA_64BITHASH; rdpg->rp_count = min_t(unsigned int, reqbody->nlink, exp_max_brw_size(info->mti_exp)); - rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE - 1) >> - CFS_PAGE_SHIFT; + rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]); if (rdpg->rp_pages == NULL) RETURN(-ENOMEM); for (i = 0; i < rdpg->rp_npages; ++i) { - rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD); + rdpg->rp_pages[i] = alloc_page(GFP_IOFS); if (rdpg->rp_pages[i] == NULL) GOTO(free_rdpg, rc = -ENOMEM); } @@ -1704,7 +1704,7 @@ free_rdpg: for (i = 0; i < rdpg->rp_npages; i++) if (rdpg->rp_pages[i] != NULL) - cfs_free_page(rdpg->rp_pages[i]); + __free_page(rdpg->rp_pages[i]); OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof rdpg->rp_pages[0]); if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) @@ -2117,14 +2117,15 @@ int mdt_obd_idx_read(struct mdt_thread_info *info) GOTO(out, rc = -EFAULT); rdpg->rp_count = min_t(unsigned int, req_ii->ii_count << LU_PAGE_SHIFT, exp_max_brw_size(info->mti_exp)); - rdpg->rp_npages = (rdpg->rp_count + CFS_PAGE_SIZE -1) >> CFS_PAGE_SHIFT; + rdpg->rp_npages = (rdpg->rp_count + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; /* allocate pages to store the containers */ OBD_ALLOC(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0])); if (rdpg->rp_pages == NULL) GOTO(out, rc = -ENOMEM); for (i = 0; i < rdpg->rp_npages; i++) { - rdpg->rp_pages[i] = cfs_alloc_page(CFS_ALLOC_STD); + rdpg->rp_pages[i] = alloc_page(GFP_IOFS); if (rdpg->rp_pages[i] == NULL) GOTO(out, rc = -ENOMEM); } @@ -2145,7 +2146,7 @@ out: if (rdpg->rp_pages) { for (i = 0; i < rdpg->rp_npages; i++) if (rdpg->rp_pages[i]) - cfs_free_page(rdpg->rp_pages[i]); + __free_page(rdpg->rp_pages[i]); OBD_FREE(rdpg->rp_pages, rdpg->rp_npages * sizeof(rdpg->rp_pages[0])); } @@ -4996,7 +4997,7 @@ static struct lu_object *mdt_object_alloc(const struct lu_env *env, ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(mo, mdt_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(mo, mdt_object_kmem, __GFP_IO); if (mo != NULL) { struct lu_object *o; struct lu_object_header *h; diff --git a/lustre/mdt/mdt_lproc.c b/lustre/mdt/mdt_lproc.c index 1d4b483..f83366b 100644 --- a/lustre/mdt/mdt_lproc.c +++ b/lustre/mdt/mdt_lproc.c @@ -355,7 +355,7 @@ static int lprocfs_wr_identity_upcall(struct file *file, const char *buffer, OBD_ALLOC(kernbuf, count + 1); if (kernbuf == NULL) GOTO(failed, rc = -ENOMEM); - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) GOTO(failed, rc = -EFAULT); /* Remove any extraneous bits from the upcall (e.g. linefeeds) */ @@ -416,7 +416,7 @@ again: if (param == NULL) return -ENOMEM; - if (cfs_copy_from_user(param, buffer, size)) { + if (copy_from_user(param, buffer, size)) { CERROR("%s: bad identity data\n", mdt_obd_name(mdt)); GOTO(out, rc = -EFAULT); } @@ -597,8 +597,8 @@ static int lprocfs_mdt_wr_evict_client(struct file *file, const char *buffer, * bytes into kbuf, to ensure that the string is NUL-terminated. * UUID_MAX should include a trailing NUL already. */ - if (cfs_copy_from_user(kbuf, buffer, - min_t(unsigned long, BUFLEN - 1, count))) { + if (copy_from_user(kbuf, buffer, + min_t(unsigned long, BUFLEN - 1, count))) { count = -EFAULT; goto out; } @@ -714,7 +714,7 @@ static int lprocfs_wr_root_squash(struct file *file, const char *buffer, errmsg = "string too long"; GOTO(failed, rc = -EINVAL); } - if (cfs_copy_from_user(kernbuf, buffer, count)) { + if (copy_from_user(kernbuf, buffer, count)) { errmsg = "bad address"; GOTO(failed, rc = -EFAULT); } @@ -783,7 +783,7 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer, errmsg = "no memory"; GOTO(failed, rc = -ENOMEM); } - if (cfs_copy_from_user(kernbuf, buffer, count)) { + if (copy_from_user(kernbuf, buffer, count)) { errmsg = "bad address"; GOTO(failed, rc = -EFAULT); } @@ -856,7 +856,7 @@ static int lprocfs_wr_mdt_som(struct file *file, const char *buffer, if (count > (sizeof(kernbuf) - 1)) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; kernbuf[count] = '\0'; @@ -913,8 +913,8 @@ static int lprocfs_mdt_wr_mdc(struct file *file, const char *buffer, * bytes into kbuf, to ensure that the string is NUL-terminated. * UUID_MAX should include a trailing NUL already. */ - if (cfs_copy_from_user(kbuf, buffer, - min_t(unsigned long, UUID_MAX - 1, count))) { + if (copy_from_user(kbuf, buffer, + min_t(unsigned long, UUID_MAX - 1, count))) { count = -EFAULT; goto out; } diff --git a/lustre/mgc/mgc_request.c b/lustre/mgc/mgc_request.c index da798ae..e024b9d 100644 --- a/lustre/mgc/mgc_request.c +++ b/lustre/mgc/mgc_request.c @@ -1234,7 +1234,7 @@ static int mgc_llog_finish(struct obd_device *obd, int count) } enum { - CONFIG_READ_NRPAGES_INIT = 1 << (20 - CFS_PAGE_SHIFT), + CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), CONFIG_READ_NRPAGES = 4 }; @@ -1260,22 +1260,22 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, LASSERT(cfg->cfg_instance != NULL); LASSERT(cfg->cfg_sb == cfg->cfg_instance); - OBD_ALLOC(inst, CFS_PAGE_SIZE); - if (inst == NULL) - RETURN(-ENOMEM); + OBD_ALLOC(inst, PAGE_CACHE_SIZE); + if (inst == NULL) + RETURN(-ENOMEM); if (!IS_SERVER(lsi)) { - pos = snprintf(inst, CFS_PAGE_SIZE, "%p", cfg->cfg_instance); - if (pos >= CFS_PAGE_SIZE) { - OBD_FREE(inst, CFS_PAGE_SIZE); + pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); + if (pos >= PAGE_CACHE_SIZE) { + OBD_FREE(inst, PAGE_CACHE_SIZE); return -E2BIG; } } else { LASSERT(IS_MDT(lsi)); rc = server_name2svname(lsi->lsi_svname, inst, NULL, - CFS_PAGE_SIZE); + PAGE_CACHE_SIZE); if (rc) { - OBD_FREE(inst, CFS_PAGE_SIZE); + OBD_FREE(inst, PAGE_CACHE_SIZE); RETURN(-EINVAL); } pos = strlen(inst); @@ -1283,7 +1283,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, ++pos; buf = inst + pos; - bufsz = CFS_PAGE_SIZE - pos; + bufsz = PAGE_CACHE_SIZE - pos; while (datalen > 0) { int entry_len = sizeof(*entry); @@ -1315,7 +1315,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* Keep this swab for normal mixed endian handling. LU-1644 */ if (mne_swab) lustre_swab_mgs_nidtbl_entry(entry); - if (entry->mne_length > CFS_PAGE_SIZE) { + if (entry->mne_length > PAGE_CACHE_SIZE) { CERROR("MNE too large (%u)\n", entry->mne_length); break; } @@ -1433,7 +1433,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* continue, even one with error */ } - OBD_FREE(inst, CFS_PAGE_SIZE); + OBD_FREE(inst, PAGE_CACHE_SIZE); RETURN(rc); } @@ -1449,7 +1449,7 @@ static int mgc_process_recover_log(struct obd_device *obd, struct mgs_config_body *body; struct mgs_config_res *res; struct ptlrpc_bulk_desc *desc; - cfs_page_t **pages; + struct page **pages; int nrpages; bool eof = true; bool mne_swab = false; @@ -1473,7 +1473,7 @@ static int mgc_process_recover_log(struct obd_device *obd, GOTO(out, rc = -ENOMEM); for (i = 0; i < nrpages; i++) { - pages[i] = cfs_alloc_page(CFS_ALLOC_STD); + pages[i] = alloc_page(GFP_IOFS); if (pages[i] == NULL) GOTO(out, rc = -ENOMEM); } @@ -1499,7 +1499,7 @@ again: GOTO(out, rc = -E2BIG); body->mcb_offset = cfg->cfg_last_idx + 1; body->mcb_type = cld->cld_type; - body->mcb_bits = CFS_PAGE_SHIFT; + body->mcb_bits = PAGE_CACHE_SHIFT; body->mcb_units = nrpages; /* allocate bulk transfer descriptor */ @@ -1509,7 +1509,7 @@ again: GOTO(out, rc = -ENOMEM); for (i = 0; i < nrpages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); @@ -1532,7 +1532,7 @@ again: if (ealen < 0) GOTO(out, rc = ealen); - if (ealen > nrpages << CFS_PAGE_SHIFT) + if (ealen > nrpages << PAGE_CACHE_SHIFT) GOTO(out, rc = -EINVAL); if (ealen == 0) { /* no logs transferred */ @@ -1555,18 +1555,18 @@ again: int rc2; void *ptr; - ptr = cfs_kmap(pages[i]); - rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, - min_t(int, ealen, CFS_PAGE_SIZE), + ptr = kmap(pages[i]); + rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, + min_t(int, ealen, PAGE_CACHE_SIZE), mne_swab); - cfs_kunmap(pages[i]); - if (rc2 < 0) { - CWARN("Process recover log %s error %d\n", - cld->cld_logname, rc2); - break; + kunmap(pages[i]); + if (rc2 < 0) { + CWARN("Process recover log %s error %d\n", + cld->cld_logname, rc2); + break; } - ealen -= CFS_PAGE_SIZE; + ealen -= PAGE_CACHE_SIZE; } out: @@ -1576,15 +1576,15 @@ out: if (rc == 0 && !eof) goto again; - if (pages) { - for (i = 0; i < nrpages; i++) { - if (pages[i] == NULL) - break; - cfs_free_page(pages[i]); - } - OBD_FREE(pages, sizeof(*pages) * nrpages); - } - return rc; + if (pages) { + for (i = 0; i < nrpages; i++) { + if (pages[i] == NULL) + break; + __free_page(pages[i]); + } + OBD_FREE(pages, sizeof(*pages) * nrpages); + } + return rc; } #ifdef HAVE_LDISKFS_OSD diff --git a/lustre/mgs/mgs_handler.c b/lustre/mgs/mgs_handler.c index 3be0e76..cc600b4 100644 --- a/lustre/mgs/mgs_handler.c +++ b/lustre/mgs/mgs_handler.c @@ -837,14 +837,14 @@ static int mgs_iocontrol_pool(const struct lu_env *env, GOTO(out_pool, rc = -EINVAL); } - if (data->ioc_plen1 > CFS_PAGE_SIZE) + if (data->ioc_plen1 > PAGE_CACHE_SIZE) GOTO(out_pool, rc = -E2BIG); OBD_ALLOC(lcfg, data->ioc_plen1); if (lcfg == NULL) GOTO(out_pool, rc = -ENOMEM); - if (cfs_copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1)) + if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1)) GOTO(out_lcfg, rc = -EFAULT); if (lcfg->lcfg_bufcount < 2) @@ -929,7 +929,7 @@ int mgs_iocontrol(unsigned int cmd, struct obd_export *exp, int len, OBD_ALLOC(lcfg, data->ioc_plen1); if (lcfg == NULL) GOTO(out, rc = -ENOMEM); - if (cfs_copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1)) + if (copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1)) GOTO(out_free, rc = -EFAULT); if (lcfg->lcfg_bufcount < 1) diff --git a/lustre/mgs/mgs_nids.c b/lustre/mgs/mgs_nids.c index dc1ff04..4f46881 100644 --- a/lustre/mgs/mgs_nids.c +++ b/lustre/mgs/mgs_nids.c @@ -79,7 +79,7 @@ static int nidtbl_is_sane(struct mgs_nidtbl *tbl) * shouldn't cross unit boundaries. */ static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl, - struct mgs_config_res *res, cfs_page_t **pages, + struct mgs_config_res *res, struct page **pages, int nrpages, int units_total, int unit_size) { struct mgs_nidtbl_target *tgt; @@ -97,7 +97,7 @@ static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl, /* make sure unit_size is power 2 */ LASSERT((unit_size & (unit_size - 1)) == 0); - LASSERT(nrpages << CFS_PAGE_SHIFT >= units_total * unit_size); + LASSERT(nrpages << PAGE_CACHE_SHIFT >= units_total * unit_size); mutex_lock(&tbl->mn_lock); LASSERT(nidtbl_is_sane(tbl)); @@ -154,25 +154,25 @@ static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl, } LASSERT((rc & (unit_size - 1)) == 0); - if (units_in_page == 0) { - /* allocate a new page */ - pages[index] = cfs_alloc_page(CFS_ALLOC_STD); - if (pages[index] == NULL) { - rc = -ENOMEM; - break; - } + if (units_in_page == 0) { + /* allocate a new page */ + pages[index] = alloc_page(GFP_IOFS); + if (pages[index] == NULL) { + rc = -ENOMEM; + break; + } - /* destroy previous map */ - if (index > 0) - cfs_kunmap(pages[index - 1]); + /* destroy previous map */ + if (index > 0) + kunmap(pages[index - 1]); - /* reassign buffer */ - buf = cfs_kmap(pages[index]); - ++index; + /* reassign buffer */ + buf = kmap(pages[index]); + ++index; - units_in_page = CFS_PAGE_SIZE / unit_size; - LASSERT(units_in_page > 0); - } + units_in_page = PAGE_CACHE_SIZE / unit_size; + LASSERT(units_in_page > 0); + } /* allocate an unit */ LASSERT(((long)buf & (unit_size - 1)) == 0); @@ -212,7 +212,7 @@ static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl, bytes_in_unit, index, nrpages, units_total); } if (index > 0) - cfs_kunmap(pages[index - 1]); + kunmap(pages[index - 1]); out: LASSERT(version <= tbl->mn_version); res->mcr_size = tbl->mn_version; @@ -628,7 +628,7 @@ int mgs_get_ir_logs(struct ptlrpc_request *req) int bytes; int page_count; int nrpages; - cfs_page_t **pages = NULL; + struct page **pages = NULL; ENTRY; body = req_capsule_client_get(&req->rq_pill, &RMF_MGS_CONFIG_BODY); @@ -647,7 +647,7 @@ int mgs_get_ir_logs(struct ptlrpc_request *req) RETURN(rc); bufsize = body->mcb_units << body->mcb_bits; - nrpages = (bufsize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; + nrpages = (bufsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; if (nrpages > PTLRPC_MAX_BRW_PAGES) RETURN(-EINVAL); @@ -667,14 +667,14 @@ int mgs_get_ir_logs(struct ptlrpc_request *req) GOTO(out, rc = -EINVAL); res->mcr_offset = body->mcb_offset; - unit_size = min_t(int, 1 << body->mcb_bits, CFS_PAGE_SIZE); + unit_size = min_t(int, 1 << body->mcb_bits, PAGE_CACHE_SIZE); bytes = mgs_nidtbl_read(req->rq_export, &fsdb->fsdb_nidtbl, res, pages, nrpages, bufsize / unit_size, unit_size); if (bytes < 0) GOTO(out, rc = bytes); /* start bulk transfer */ - page_count = (bytes + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; + page_count = (bytes + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; LASSERT(page_count <= nrpages); desc = ptlrpc_prep_bulk_exp(req, page_count, 1, BULK_PUT_SOURCE, MGS_BULK_PORTAL); @@ -683,8 +683,8 @@ int mgs_get_ir_logs(struct ptlrpc_request *req) for (i = 0; i < page_count && bytes > 0; i++) { ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, - min_t(int, bytes, CFS_PAGE_SIZE)); - bytes -= CFS_PAGE_SIZE; + min_t(int, bytes, PAGE_CACHE_SIZE)); + bytes -= PAGE_CACHE_SIZE; } rc = target_bulk_io(req->rq_export, desc, &lwi); @@ -694,7 +694,7 @@ out: for (i = 0; i < nrpages; i++) { if (pages[i] == NULL) break; - cfs_free_page(pages[i]); + __free_page(pages[i]); } OBD_FREE(pages, sizeof(*pages) * nrpages); return rc; @@ -760,7 +760,7 @@ int lprocfs_wr_ir_state(struct file *file, const char *buffer, char *ptr; int rc = 0; - if (count > CFS_PAGE_SIZE) + if (count > PAGE_CACHE_SIZE) return -EINVAL; OBD_ALLOC(kbuf, count + 1); diff --git a/lustre/obdclass/capa.c b/lustre/obdclass/capa.c index 02fb53d..47f52d0 100644 --- a/lustre/obdclass/capa.c +++ b/lustre/obdclass/capa.c @@ -63,7 +63,7 @@ #define NR_CAPAHASH 32 #define CAPA_HASH_SIZE 3000 /* for MDS & OSS */ -cfs_mem_cache_t *capa_cachep = NULL; +struct kmem_cache *capa_cachep; #ifdef __KERNEL__ /* lock for capa hash/capa_list/fo_capa_keys */ @@ -85,19 +85,19 @@ EXPORT_SYMBOL(capa_count); cfs_hlist_head_t *init_capa_hash(void) { - cfs_hlist_head_t *hash; - int nr_hash, i; + cfs_hlist_head_t *hash; + int nr_hash, i; - OBD_ALLOC(hash, CFS_PAGE_SIZE); - if (!hash) - return NULL; + OBD_ALLOC(hash, PAGE_CACHE_SIZE); + if (!hash) + return NULL; - nr_hash = CFS_PAGE_SIZE / sizeof(cfs_hlist_head_t); - LASSERT(nr_hash > NR_CAPAHASH); + nr_hash = PAGE_CACHE_SIZE / sizeof(cfs_hlist_head_t); + LASSERT(nr_hash > NR_CAPAHASH); - for (i = 0; i < NR_CAPAHASH; i++) - CFS_INIT_HLIST_HEAD(hash + i); - return hash; + for (i = 0; i < NR_CAPAHASH; i++) + CFS_INIT_HLIST_HEAD(hash + i); + return hash; } EXPORT_SYMBOL(init_capa_hash); @@ -131,7 +131,7 @@ void cleanup_capa_hash(cfs_hlist_head_t *hash) } spin_unlock(&capa_lock); - OBD_FREE(hash, CFS_PAGE_SIZE); + OBD_FREE(hash, PAGE_CACHE_SIZE); } EXPORT_SYMBOL(cleanup_capa_hash); @@ -267,7 +267,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key) sg_set_page(&sl, virt_to_page(capa), offsetof(struct lustre_capa, lc_hmac), - (unsigned long)(capa) % CFS_PAGE_SIZE); + (unsigned long)(capa) % PAGE_CACHE_SIZE); ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac); ll_crypto_free_hash(tfm); @@ -307,11 +307,11 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) GOTO(out, rc); } - sg_set_page(&sd, virt_to_page(d), 16, - (unsigned long)(d) % CFS_PAGE_SIZE); + sg_set_page(&sd, virt_to_page(d), 16, + (unsigned long)(d) % PAGE_CACHE_SIZE); - sg_set_page(&ss, virt_to_page(s), 16, - (unsigned long)(s) % CFS_PAGE_SIZE); + sg_set_page(&ss, virt_to_page(s), 16, + (unsigned long)(s) % PAGE_CACHE_SIZE); desc.tfm = tfm; desc.info = NULL; desc.flags = 0; @@ -360,11 +360,11 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen) GOTO(out, rc); } - sg_set_page(&sd, virt_to_page(d), 16, - (unsigned long)(d) % CFS_PAGE_SIZE); + sg_set_page(&sd, virt_to_page(d), 16, + (unsigned long)(d) % PAGE_CACHE_SIZE); - sg_set_page(&ss, virt_to_page(s), 16, - (unsigned long)(s) % CFS_PAGE_SIZE); + sg_set_page(&ss, virt_to_page(s), 16, + (unsigned long)(s) % PAGE_CACHE_SIZE); desc.tfm = tfm; desc.info = NULL; diff --git a/lustre/obdclass/cl_lock.c b/lustre/obdclass/cl_lock.c index fde2be4..21a5306 100644 --- a/lustre/obdclass/cl_lock.c +++ b/lustre/obdclass/cl_lock.c @@ -49,7 +49,7 @@ /** Lock class of cl_lock::cll_guard */ static struct lock_class_key cl_lock_guard_class; -static cfs_mem_cache_t *cl_lock_kmem; +static struct kmem_cache *cl_lock_kmem; static struct lu_kmem_descr cl_lock_caches[] = { { @@ -384,7 +384,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env, struct lu_object_header *head; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO); if (lock != NULL) { cfs_atomic_set(&lock->cll_ref, 1); lock->cll_descr = *descr; diff --git a/lustre/obdclass/cl_object.c b/lustre/obdclass/cl_object.c index 9fae7a9..fd036aa 100644 --- a/lustre/obdclass/cl_object.c +++ b/lustre/obdclass/cl_object.c @@ -61,7 +61,7 @@ #include #include "cl_internal.h" -static cfs_mem_cache_t *cl_env_kmem; +static struct kmem_cache *cl_env_kmem; /** Lock class of cl_object_header::coh_page_guard */ static struct lock_class_key cl_page_guard_class; @@ -749,7 +749,7 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug) struct lu_env *env; struct cl_env *cle; - OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(cle, cl_env_kmem, __GFP_IO); if (cle != NULL) { int rc; diff --git a/lustre/obdclass/cl_page.c b/lustre/obdclass/cl_page.c index cfa1f60..b7dfab1 100644 --- a/lustre/obdclass/cl_page.c +++ b/lustre/obdclass/cl_page.c @@ -323,7 +323,7 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env, ENTRY; OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize, - CFS_ALLOC_IO); + __GFP_IO); if (page != NULL) { int result = 0; cfs_atomic_set(&page->cp_ref, 1); @@ -655,7 +655,7 @@ EXPORT_SYMBOL(cl_page_put); /** * Returns a VM page associated with a given cl_page. */ -cfs_page_t *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) +struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page) { const struct cl_page_slice *slice; @@ -678,7 +678,7 @@ EXPORT_SYMBOL(cl_page_vmpage); /** * Returns a cl_page associated with a VM page, and given cl_object. */ -struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj) +struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj) { struct cl_page *top; struct cl_page *page; @@ -1575,10 +1575,7 @@ EXPORT_SYMBOL(cl_page_cancel); */ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx) { - /* - * XXX for now. - */ - return (loff_t)idx << CFS_PAGE_SHIFT; + return (loff_t)idx << PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_offset); @@ -1587,16 +1584,13 @@ EXPORT_SYMBOL(cl_offset); */ pgoff_t cl_index(const struct cl_object *obj, loff_t offset) { - /* - * XXX for now. - */ - return offset >> CFS_PAGE_SHIFT; + return offset >> PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_index); int cl_page_size(const struct cl_object *obj) { - return 1 << CFS_PAGE_SHIFT; + return 1 << PAGE_CACHE_SHIFT; } EXPORT_SYMBOL(cl_page_size); diff --git a/lustre/obdclass/class_obd.c b/lustre/obdclass/class_obd.c index 2747e10..6e4a56b 100644 --- a/lustre/obdclass/class_obd.c +++ b/lustre/obdclass/class_obd.c @@ -182,7 +182,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type, "("LPU64" bytes) allocated by Lustre, " "%d total bytes by LNET\n", obd_memory_sum(), - obd_pages_sum() << CFS_PAGE_SHIFT, + obd_pages_sum() << PAGE_CACHE_SHIFT, obd_pages_sum(), cfs_atomic_read(&libcfs_kmemory)); return 1; @@ -268,7 +268,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg) OBD_ALLOC(lcfg, data->ioc_plen1); if (lcfg == NULL) GOTO(out, err = -ENOMEM); - err = cfs_copy_from_user(lcfg, data->ioc_pbuf1, + err = copy_from_user(lcfg, data->ioc_pbuf1, data->ioc_plen1); if (!err) err = lustre_cfg_sanity_check(lcfg, data->ioc_plen1); @@ -514,9 +514,9 @@ int obd_init_checks(void) CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); ret = -EINVAL; } - if ((u64val & ~CFS_PAGE_MASK) >= CFS_PAGE_SIZE) { + if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) { CWARN("mask failed: u64val "LPU64" >= "LPU64"\n", u64val, - (__u64)CFS_PAGE_SIZE); + (__u64)PAGE_CACHE_SIZE); ret = -EINVAL; } @@ -588,10 +588,10 @@ int init_obdclass(void) /* Default the dirty page cache cap to 1/2 of system memory. * For clients with less memory, a larger fraction is needed * for other purposes (mostly for BGL). */ - if (cfs_num_physpages <= 512 << (20 - CFS_PAGE_SHIFT)) - obd_max_dirty_pages = cfs_num_physpages / 4; - else - obd_max_dirty_pages = cfs_num_physpages / 2; + if (num_physpages <= 512 << (20 - PAGE_CACHE_SHIFT)) + obd_max_dirty_pages = num_physpages / 4; + else + obd_max_dirty_pages = num_physpages / 2; err = obd_init_caches(); if (err) diff --git a/lustre/obdclass/debug.c b/lustre/obdclass/debug.c index 93467e6..055fe0e 100644 --- a/lustre/obdclass/debug.c +++ b/lustre/obdclass/debug.c @@ -55,7 +55,7 @@ void dump_lniobuf(struct niobuf_local *nb) "niobuf_local: file_offset="LPD64", len=%d, page=%p, rc=%d\n", nb->lnb_file_offset, nb->len, nb->page, nb->rc); CDEBUG(D_RPCTRACE, "nb->page: index = %ld\n", - nb->page ? cfs_page_index(nb->page) : -1); + nb->page ? page_index(nb->page) : -1); } EXPORT_SYMBOL(dump_lniobuf); diff --git a/lustre/obdclass/dt_object.c b/lustre/obdclass/dt_object.c index 275674b..deb5863 100644 --- a/lustre/obdclass/dt_object.c +++ b/lustre/obdclass/dt_object.c @@ -808,7 +808,7 @@ int dt_index_walk(const struct lu_env *env, struct dt_object *obj, int i; LASSERT(pageidx < rdpg->rp_npages); - lp = cfs_kmap(rdpg->rp_pages[pageidx]); + lp = kmap(rdpg->rp_pages[pageidx]); /* fill lu pages */ for (i = 0; i < LU_PAGE_COUNT; i++, lp++, nob -= LU_PAGE_SIZE) { @@ -822,7 +822,7 @@ int dt_index_walk(const struct lu_env *env, struct dt_object *obj, /* end of index */ break; } - cfs_kunmap(rdpg->rp_pages[i]); + kunmap(rdpg->rp_pages[i]); } iops->put(env, it); diff --git a/lustre/obdclass/genops.c b/lustre/obdclass/genops.c index 6900bbc..f8c2b93 100644 --- a/lustre/obdclass/genops.c +++ b/lustre/obdclass/genops.c @@ -50,10 +50,10 @@ extern cfs_list_t obd_types; spinlock_t obd_types_lock; -cfs_mem_cache_t *obd_device_cachep; -cfs_mem_cache_t *obdo_cachep; +struct kmem_cache *obd_device_cachep; +struct kmem_cache *obdo_cachep; EXPORT_SYMBOL(obdo_cachep); -cfs_mem_cache_t *import_cachep; +struct kmem_cache *import_cachep; cfs_list_t obd_zombie_imports; cfs_list_t obd_zombie_exports; @@ -73,13 +73,13 @@ EXPORT_SYMBOL(ptlrpc_put_connection_superhack); */ static struct obd_device *obd_device_alloc(void) { - struct obd_device *obd; + struct obd_device *obd; - OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, CFS_ALLOC_IO); - if (obd != NULL) { - obd->obd_magic = OBD_DEVICE_MAGIC; - } - return obd; + OBD_SLAB_ALLOC_PTR_GFP(obd, obd_device_cachep, __GFP_IO); + if (obd != NULL) { + obd->obd_magic = OBD_DEVICE_MAGIC; + } + return obd; } static void obd_device_free(struct obd_device *obd) @@ -642,27 +642,21 @@ EXPORT_SYMBOL(class_notify_sptlrpc_conf); void obd_cleanup_caches(void) { - int rc; - ENTRY; if (obd_device_cachep) { - rc = cfs_mem_cache_destroy(obd_device_cachep); - LASSERTF(rc == 0, "Cannot destropy ll_obd_device_cache: rc %d\n", rc); + kmem_cache_destroy(obd_device_cachep); obd_device_cachep = NULL; } if (obdo_cachep) { - rc = cfs_mem_cache_destroy(obdo_cachep); - LASSERTF(rc == 0, "Cannot destory ll_obdo_cache\n"); + kmem_cache_destroy(obdo_cachep); obdo_cachep = NULL; } if (import_cachep) { - rc = cfs_mem_cache_destroy(import_cachep); - LASSERTF(rc == 0, "Cannot destory ll_import_cache\n"); + kmem_cache_destroy(import_cachep); import_cachep = NULL; } if (capa_cachep) { - rc = cfs_mem_cache_destroy(capa_cachep); - LASSERTF(rc == 0, "Cannot destory capa_cache\n"); + kmem_cache_destroy(capa_cachep); capa_cachep = NULL; } EXIT; @@ -670,38 +664,38 @@ void obd_cleanup_caches(void) int obd_init_caches(void) { - ENTRY; + ENTRY; - LASSERT(obd_device_cachep == NULL); - obd_device_cachep = cfs_mem_cache_create("ll_obd_dev_cache", - sizeof(struct obd_device), - 0, 0); - if (!obd_device_cachep) - GOTO(out, -ENOMEM); - - LASSERT(obdo_cachep == NULL); - obdo_cachep = cfs_mem_cache_create("ll_obdo_cache", sizeof(struct obdo), - 0, 0); - if (!obdo_cachep) - GOTO(out, -ENOMEM); - - LASSERT(import_cachep == NULL); - import_cachep = cfs_mem_cache_create("ll_import_cache", - sizeof(struct obd_import), - 0, 0); - if (!import_cachep) - GOTO(out, -ENOMEM); - - LASSERT(capa_cachep == NULL); - capa_cachep = cfs_mem_cache_create("capa_cache", - sizeof(struct obd_capa), 0, 0); - if (!capa_cachep) - GOTO(out, -ENOMEM); + LASSERT(obd_device_cachep == NULL); + obd_device_cachep = kmem_cache_create("ll_obd_dev_cache", + sizeof(struct obd_device), + 0, 0, NULL); + if (!obd_device_cachep) + GOTO(out, -ENOMEM); + + LASSERT(obdo_cachep == NULL); + obdo_cachep = kmem_cache_create("ll_obdo_cache", sizeof(struct obdo), + 0, 0, NULL); + if (!obdo_cachep) + GOTO(out, -ENOMEM); + + LASSERT(import_cachep == NULL); + import_cachep = kmem_cache_create("ll_import_cache", + sizeof(struct obd_import), + 0, 0, NULL); + if (!import_cachep) + GOTO(out, -ENOMEM); + + LASSERT(capa_cachep == NULL); + capa_cachep = kmem_cache_create("capa_cache", sizeof(struct obd_capa), + 0, 0, NULL); + if (!capa_cachep) + GOTO(out, -ENOMEM); - RETURN(0); - out: - obd_cleanup_caches(); - RETURN(-ENOMEM); + RETURN(0); +out: + obd_cleanup_caches(); + RETURN(-ENOMEM); } diff --git a/lustre/obdclass/linkea.c b/lustre/obdclass/linkea.c index 8fd9076..eed6fd2 100644 --- a/lustre/obdclass/linkea.c +++ b/lustre/obdclass/linkea.c @@ -33,7 +33,7 @@ int linkea_data_new(struct linkea_data *ldata, struct lu_buf *buf) { - ldata->ld_buf = lu_buf_check_and_alloc(buf, CFS_PAGE_SIZE); + ldata->ld_buf = lu_buf_check_and_alloc(buf, PAGE_CACHE_SIZE); if (ldata->ld_buf->lb_buf == NULL) return -ENOMEM; ldata->ld_leh = ldata->ld_buf->lb_buf; diff --git a/lustre/obdclass/linux/linux-module.c b/lustre/obdclass/linux/linux-module.c index 0648e9a..cc7c989 100644 --- a/lustre/obdclass/linux/linux-module.c +++ b/lustre/obdclass/linux/linux-module.c @@ -90,7 +90,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) int offset = 0; ENTRY; - err = cfs_copy_from_user(&hdr, (void *)arg, sizeof(hdr)); + err = copy_from_user(&hdr, (void *)arg, sizeof(hdr)); if ( err ) RETURN(err); @@ -124,7 +124,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg) *len = hdr.ioc_len; data = (struct obd_ioctl_data *)*buf; - err = cfs_copy_from_user(*buf, (void *)arg, hdr.ioc_len); + err = copy_from_user(*buf, (void *)arg, hdr.ioc_len); if ( err ) { OBD_FREE_LARGE(*buf, hdr.ioc_len); RETURN(err); @@ -162,12 +162,12 @@ EXPORT_SYMBOL(obd_ioctl_getdata); int obd_ioctl_popdata(void *arg, void *data, int len) { - int err; + int err; - err = cfs_copy_to_user(arg, data, len); - if (err) - err = -EFAULT; - return err; + err = copy_to_user(arg, data, len); + if (err) + err = -EFAULT; + return err; } EXPORT_SYMBOL(obd_ioctl_popdata); diff --git a/lustre/obdclass/linux/linux-obdo.c b/lustre/obdclass/linux/linux-obdo.c index e5b17bc..b393323 100644 --- a/lustre/obdclass/linux/linux-obdo.c +++ b/lustre/obdclass/linux/linux-obdo.c @@ -172,8 +172,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, obd_flag valid) if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) dst->i_blkbits = ffs(src->o_blksize) - 1; - if (dst->i_blkbits < CFS_PAGE_SHIFT) - dst->i_blkbits = CFS_PAGE_SHIFT; + if (dst->i_blkbits < PAGE_CACHE_SHIFT) + dst->i_blkbits = PAGE_CACHE_SHIFT; /* allocation of space */ if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) diff --git a/lustre/obdclass/linux/linux-sysctl.c b/lustre/obdclass/linux/linux-sysctl.c index a28f17a..4fa96ca 100644 --- a/lustre/obdclass/linux/linux-sysctl.c +++ b/lustre/obdclass/linux/linux-sysctl.c @@ -134,7 +134,7 @@ int LL_PROC_PROTO(proc_memory_alloc) if (len > *lenp) len = *lenp; buf[len] = '\0'; - if (cfs_copy_to_user(buffer, buf, len)) + if (copy_to_user(buffer, buf, len)) return -EFAULT; *lenp = len; *ppos += *lenp; @@ -158,7 +158,7 @@ int LL_PROC_PROTO(proc_pages_alloc) if (len > *lenp) len = *lenp; buf[len] = '\0'; - if (cfs_copy_to_user(buffer, buf, len)) + if (copy_to_user(buffer, buf, len)) return -EFAULT; *lenp = len; *ppos += *lenp; @@ -182,7 +182,7 @@ int LL_PROC_PROTO(proc_mem_max) if (len > *lenp) len = *lenp; buf[len] = '\0'; - if (cfs_copy_to_user(buffer, buf, len)) + if (copy_to_user(buffer, buf, len)) return -EFAULT; *lenp = len; *ppos += *lenp; @@ -206,7 +206,7 @@ int LL_PROC_PROTO(proc_pages_max) if (len > *lenp) len = *lenp; buf[len] = '\0'; - if (cfs_copy_to_user(buffer, buf, len)) + if (copy_to_user(buffer, buf, len)) return -EFAULT; *lenp = len; *ppos += *lenp; @@ -215,44 +215,44 @@ int LL_PROC_PROTO(proc_pages_max) int LL_PROC_PROTO(proc_max_dirty_pages_in_mb) { - int rc = 0; - DECLARE_LL_PROC_PPOS_DECL; - - if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) { - *lenp = 0; - return 0; - } - if (write) { - rc = lprocfs_write_frac_helper(buffer, *lenp, - (unsigned int*)table->data, - 1 << (20 - CFS_PAGE_SHIFT)); - /* Don't allow them to let dirty pages exceed 90% of system - * memory and set a hard minimum of 4MB. */ - if (obd_max_dirty_pages > ((cfs_num_physpages / 10) * 9)) { - CERROR("Refusing to set max dirty pages to %u, which " - "is more than 90%% of available RAM; setting " - "to %lu\n", obd_max_dirty_pages, - ((cfs_num_physpages / 10) * 9)); - obd_max_dirty_pages = ((cfs_num_physpages / 10) * 9); - } else if (obd_max_dirty_pages < 4 << (20 - CFS_PAGE_SHIFT)) { - obd_max_dirty_pages = 4 << (20 - CFS_PAGE_SHIFT); - } - } else { - char buf[21]; - int len; - - len = lprocfs_read_frac_helper(buf, sizeof(buf), - *(unsigned int*)table->data, - 1 << (20 - CFS_PAGE_SHIFT)); - if (len > *lenp) - len = *lenp; - buf[len] = '\0'; - if (cfs_copy_to_user(buffer, buf, len)) - return -EFAULT; - *lenp = len; - } - *ppos += *lenp; - return rc; + int rc = 0; + DECLARE_LL_PROC_PPOS_DECL; + + if (!table->data || !table->maxlen || !*lenp || (*ppos && !write)) { + *lenp = 0; + return 0; + } + if (write) { + rc = lprocfs_write_frac_helper(buffer, *lenp, + (unsigned int *)table->data, + 1 << (20 - PAGE_CACHE_SHIFT)); + /* Don't allow them to let dirty pages exceed 90% of system + * memory and set a hard minimum of 4MB. */ + if (obd_max_dirty_pages > ((num_physpages / 10) * 9)) { + CERROR("Refusing to set max dirty pages to %u, which " + "is more than 90%% of available RAM; setting " + "to %lu\n", obd_max_dirty_pages, + ((num_physpages / 10) * 9)); + obd_max_dirty_pages = ((num_physpages / 10) * 9); + } else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) { + obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT); + } + } else { + char buf[21]; + int len; + + len = lprocfs_read_frac_helper(buf, sizeof(buf), + *(unsigned int *)table->data, + 1 << (20 - PAGE_CACHE_SHIFT)); + if (len > *lenp) + len = *lenp; + buf[len] = '\0'; + if (copy_to_user(buffer, buf, len)) + return -EFAULT; + *lenp = len; + } + *ppos += *lenp; + return rc; } #ifdef RANDOM_FAIL_ALLOC @@ -279,7 +279,7 @@ int LL_PROC_PROTO(proc_alloc_fail_rate) if (len > *lenp) len = *lenp; buf[len] = '\0'; - if (cfs_copy_to_user(buffer, buf, len)) + if (copy_to_user(buffer, buf, len)) return -EFAULT; *lenp = len; } diff --git a/lustre/obdclass/lprocfs_status.c b/lustre/obdclass/lprocfs_status.c index 97d4107..5cef00b 100644 --- a/lustre/obdclass/lprocfs_status.c +++ b/lustre/obdclass/lprocfs_status.c @@ -198,7 +198,7 @@ static ssize_t lprocfs_fops_read(struct file *f, char __user *buf, char *page, *start = NULL; int rc = 0, eof = 1, count; - if (*ppos >= CFS_PAGE_SIZE) + if (*ppos >= PAGE_CACHE_SIZE) return 0; page = (char *)__get_free_page(GFP_KERNEL); @@ -212,7 +212,7 @@ static ssize_t lprocfs_fops_read(struct file *f, char __user *buf, OBD_FAIL_TIMEOUT(OBD_FAIL_LPROC_REMOVE, 10); if (dp->read_proc) - rc = dp->read_proc(page, &start, *ppos, CFS_PAGE_SIZE, + rc = dp->read_proc(page, &start, *ppos, PAGE_CACHE_SIZE, &eof, dp->data); LPROCFS_EXIT(); if (rc <= 0) @@ -233,7 +233,7 @@ static ssize_t lprocfs_fops_read(struct file *f, char __user *buf, } count = (rc < size) ? rc : size; - if (cfs_copy_to_user(buf, start, count)) { + if (copy_to_user(buf, start, count)) { rc = -EFAULT; goto out; } @@ -519,7 +519,7 @@ int lprocfs_wr_uint(struct file *file, const char *buffer, unsigned long tmp; dummy[MAX_STRING_SIZE] = '\0'; - if (cfs_copy_from_user(dummy, buffer, MAX_STRING_SIZE)) + if (copy_from_user(dummy, buffer, MAX_STRING_SIZE)) return -EFAULT; tmp = simple_strtoul(dummy, &end, 0); @@ -2055,7 +2055,7 @@ int lprocfs_write_frac_helper(const char *buffer, unsigned long count, if (count > (sizeof(kernbuf) - 1)) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; kernbuf[count] = '\0'; @@ -2161,7 +2161,7 @@ int lprocfs_write_frac_u64_helper(const char *buffer, unsigned long count, if (count > (sizeof(kernbuf) - 1)) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; kernbuf[count] = '\0'; @@ -2371,11 +2371,13 @@ int lprocfs_obd_rd_recovery_status(char *page, char **start, off_t off, what we need to read */ *start = page + off; - /* We know we are allocated a page here. - Also we know that this function will - not need to write more than a page - so we can truncate at CFS_PAGE_SIZE. */ - size = min(count + (int)off + 1, (int)CFS_PAGE_SIZE); + /* + * We know we are allocated a page here. + * Also we know that this function will + * not need to write more than a page + * so we can truncate at PAGE_CACHE_SIZE. + */ + size = min(count + (int)off + 1, (int)PAGE_CACHE_SIZE); /* Initialize the page */ memset(page, 0, size); diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index f673e9d..e5021fd 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -851,12 +851,12 @@ static int lu_htable_order(void) * * Size of lu_object is (arbitrary) taken as 1K (together with inode). */ - cache_size = cfs_num_physpages; + cache_size = num_physpages; #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - CFS_PAGE_SHIFT)) - cache_size = 1 << (30 - CFS_PAGE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) + cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; #endif /* clear off unreasonable cache setting. */ @@ -869,7 +869,7 @@ static int lu_htable_order(void) lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; } cache_size = cache_size / 100 * lu_cache_percent * - (CFS_PAGE_SIZE / 1024); + (PAGE_CACHE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; @@ -1784,7 +1784,7 @@ int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags, } EXPORT_SYMBOL(lu_env_refill_by_tags); -static struct cfs_shrinker *lu_site_shrinker = NULL; +static struct shrinker *lu_site_shrinker; typedef struct lu_site_stats{ unsigned lss_populated; @@ -1985,7 +1985,7 @@ int lu_global_init(void) * inode, one for ea. Unfortunately setting this high value results in * lu_object/inode cache consuming all the memory. */ - lu_site_shrinker = cfs_set_shrinker(CFS_DEFAULT_SEEKS, lu_cache_shrink); + lu_site_shrinker = set_shrinker(DEFAULT_SEEKS, lu_cache_shrink); if (lu_site_shrinker == NULL) return -ENOMEM; @@ -1998,7 +1998,7 @@ int lu_global_init(void) void lu_global_fini(void) { if (lu_site_shrinker != NULL) { - cfs_remove_shrinker(lu_site_shrinker); + remove_shrinker(lu_site_shrinker); lu_site_shrinker = NULL; } @@ -2062,9 +2062,9 @@ int lu_kmem_init(struct lu_kmem_descr *caches) struct lu_kmem_descr *iter = caches; for (result = 0; iter->ckd_cache != NULL; ++iter) { - *iter->ckd_cache = cfs_mem_cache_create(iter->ckd_name, - iter->ckd_size, - 0, 0); + *iter->ckd_cache = kmem_cache_create(iter->ckd_name, + iter->ckd_size, + 0, 0, NULL); if (*iter->ckd_cache == NULL) { result = -ENOMEM; /* free all previously allocated caches */ @@ -2082,13 +2082,9 @@ EXPORT_SYMBOL(lu_kmem_init); */ void lu_kmem_fini(struct lu_kmem_descr *caches) { - int rc; - for (; caches->ckd_cache != NULL; ++caches) { if (*caches->ckd_cache != NULL) { - rc = cfs_mem_cache_destroy(*caches->ckd_cache); - LASSERTF(rc == 0, "couldn't destroy %s slab\n", - caches->ckd_name); + kmem_cache_destroy(*caches->ckd_cache); *caches->ckd_cache = NULL; } } diff --git a/lustre/obdclass/lu_ref.c b/lustre/obdclass/lu_ref.c index 6deddf7..19a5b35 100644 --- a/lustre/obdclass/lu_ref.c +++ b/lustre/obdclass/lu_ref.c @@ -71,7 +71,7 @@ } \ } while (0) -static cfs_mem_cache_t *lu_ref_link_kmem; +static struct kmem_cache *lu_ref_link_kmem; static struct lu_kmem_descr lu_ref_caches[] = { { @@ -188,7 +188,7 @@ static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref, void lu_ref_add(struct lu_ref *ref, const char *scope, const void *source) { cfs_might_sleep(); - lu_ref_add_context(ref, CFS_ALLOC_STD, scope, source); + lu_ref_add_context(ref, GFP_IOFS, scope, source); } EXPORT_SYMBOL(lu_ref_add); @@ -211,7 +211,7 @@ EXPORT_SYMBOL(lu_ref_add_at); void lu_ref_add_atomic(struct lu_ref *ref, const char *scope, const void *source) { - lu_ref_add_context(ref, CFS_ALLOC_ATOMIC, scope, source); + lu_ref_add_context(ref, GFP_ATOMIC, scope, source); } EXPORT_SYMBOL(lu_ref_add_atomic); diff --git a/lustre/obdecho/echo.c b/lustre/obdecho/echo.c index ba22279..dcddc39 100644 --- a/lustre/obdecho/echo.c +++ b/lustre/obdecho/echo.c @@ -54,8 +54,8 @@ #define ECHO_INIT_OID 0x10000000ULL #define ECHO_HANDLE_MAGIC 0xabcd0123fedc9876ULL -#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> CFS_PAGE_SHIFT) -static cfs_page_t *echo_persistent_pages[ECHO_PERSISTENT_PAGES]; +#define ECHO_PERSISTENT_PAGES (ECHO_PERSISTENT_SIZE >> PAGE_CACHE_SHIFT) +static struct page *echo_persistent_pages[ECHO_PERSISTENT_PAGES]; enum { LPROC_ECHO_READ_BYTES = 1, @@ -229,11 +229,11 @@ static int echo_setattr(const struct lu_env *env, struct obd_export *exp, } static void -echo_page_debug_setup(cfs_page_t *page, int rw, obd_id id, - __u64 offset, int len) +echo_page_debug_setup(struct page *page, int rw, obd_id id, + __u64 offset, int len) { - int page_offset = offset & ~CFS_PAGE_MASK; - char *addr = ((char *)cfs_kmap(page)) + page_offset; + int page_offset = offset & ~CFS_PAGE_MASK; + char *addr = ((char *)kmap(page)) + page_offset; if (len % OBD_ECHO_BLOCK_SIZE != 0) CERROR("Unexpected block size %d\n", len); @@ -252,17 +252,17 @@ echo_page_debug_setup(cfs_page_t *page, int rw, obd_id id, len -= OBD_ECHO_BLOCK_SIZE; } - cfs_kunmap(page); + kunmap(page); } static int -echo_page_debug_check(cfs_page_t *page, obd_id id, - __u64 offset, int len) +echo_page_debug_check(struct page *page, obd_id id, + __u64 offset, int len) { - int page_offset = offset & ~CFS_PAGE_MASK; - char *addr = ((char *)cfs_kmap(page)) + page_offset; - int rc = 0; - int rc2; + int page_offset = offset & ~CFS_PAGE_MASK; + char *addr = ((char *)kmap(page)) + page_offset; + int rc = 0; + int rc2; if (len % OBD_ECHO_BLOCK_SIZE != 0) CERROR("Unexpected block size %d\n", len); @@ -279,9 +279,9 @@ echo_page_debug_check(cfs_page_t *page, obd_id id, len -= OBD_ECHO_BLOCK_SIZE; } - cfs_kunmap(page); + kunmap(page); - return (rc); + return rc; } /* This allows us to verify that desc_private is passed unmolested */ @@ -292,7 +292,7 @@ static int echo_map_nb_to_lb(struct obdo *oa, struct obd_ioobj *obj, struct niobuf_local *lb, int cmd, int *left) { int gfp_mask = (ostid_id(&obj->ioo_oid) & 1) ? - CFS_ALLOC_HIGHUSER : CFS_ALLOC_STD; + GFP_HIGHUSER : GFP_IOFS; int ispersistent = ostid_id(&obj->ioo_oid) == ECHO_PERSISTENT_OBJID; int debug_setup = (!ispersistent && (oa->o_valid & OBD_MD_FLFLAGS) != 0 && @@ -301,10 +301,10 @@ static int echo_map_nb_to_lb(struct obdo *oa, struct obd_ioobj *obj, obd_off offset = nb->offset; int len = nb->len; - while (len > 0) { - int plen = CFS_PAGE_SIZE - (offset & (CFS_PAGE_SIZE-1)); - if (len < plen) - plen = len; + while (len > 0) { + int plen = PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1)); + if (len < plen) + plen = len; /* check for local buf overflow */ if (*left == 0) @@ -313,17 +313,17 @@ static int echo_map_nb_to_lb(struct obdo *oa, struct obd_ioobj *obj, res->lnb_file_offset = offset; res->len = plen; LASSERT((res->lnb_file_offset & ~CFS_PAGE_MASK) + res->len <= - CFS_PAGE_SIZE); + PAGE_CACHE_SIZE); if (ispersistent && - ((res->lnb_file_offset >> CFS_PAGE_SHIFT) < + ((res->lnb_file_offset >> PAGE_CACHE_SHIFT) < ECHO_PERSISTENT_PAGES)) { res->page = echo_persistent_pages[res->lnb_file_offset >> - CFS_PAGE_SHIFT]; - /* Take extra ref so __free_pages() can be called OK */ - cfs_get_page (res->page); - } else { + PAGE_CACHE_SHIFT]; + /* Take extra ref so __free_pages() can be called OK */ + get_page (res->page); + } else { OBD_PAGE_ALLOC(res->page, gfp_mask); if (res->page == NULL) { CERROR("can't get page for id " DOSTID"\n", @@ -355,19 +355,20 @@ static int echo_map_nb_to_lb(struct obdo *oa, struct obd_ioobj *obj, } static int echo_finalize_lb(struct obdo *oa, struct obd_ioobj *obj, - struct niobuf_remote *rb, int *pgs, - struct niobuf_local *lb, int verify) + struct niobuf_remote *rb, int *pgs, + struct niobuf_local *lb, int verify) { - struct niobuf_local *res = lb; - obd_off start = rb->offset >> CFS_PAGE_SHIFT; - obd_off end = (rb->offset + rb->len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; - int count = (int)(end - start); - int rc = 0; - int i; + struct niobuf_local *res = lb; + obd_off start = rb->offset >> PAGE_CACHE_SHIFT; + obd_off end = (rb->offset + rb->len + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + int count = (int)(end - start); + int rc = 0; + int i; - for (i = 0; i < count; i++, (*pgs) ++, res++) { - cfs_page_t *page = res->page; - void *addr; + for (i = 0; i < count; i++, (*pgs) ++, res++) { + struct page *page = res->page; + void *addr; if (page == NULL) { CERROR("null page objid "LPU64":%p, buf %d/%d\n", @@ -376,9 +377,9 @@ static int echo_finalize_lb(struct obdo *oa, struct obd_ioobj *obj, return -EFAULT; } - addr = cfs_kmap(page); + addr = kmap(page); - CDEBUG(D_PAGE, "$$$$ use page %p, addr %p@"LPU64"\n", + CDEBUG(D_PAGE, "$$$$ use page %p, addr %p@"LPU64"\n", res->page, addr, res->lnb_file_offset); if (verify) { @@ -391,12 +392,12 @@ static int echo_finalize_lb(struct obdo *oa, struct obd_ioobj *obj, rc = vrc; } - cfs_kunmap(page); - /* NB see comment above regarding persistent pages */ - OBD_PAGE_FREE(page); - } + kunmap(page); + /* NB see comment above regarding persistent pages */ + OBD_PAGE_FREE(page); + } - return rc; + return rc; } static int echo_preprw(const struct lu_env *env, int cmd, @@ -466,7 +467,7 @@ preprw_cleanup: */ CERROR("cleaning up %u pages (%d obdos)\n", *pages, objcount); for (i = 0; i < *pages; i++) { - cfs_kunmap(res[i].page); + kunmap(res[i].page); /* NB if this is a persistent page, __free_pages will just * lose the extra ref gained above */ OBD_PAGE_FREE(res[i].page); @@ -546,7 +547,7 @@ commitrw_cleanup: niocount - pgs - 1, objcount); while (pgs < niocount) { - cfs_page_t *page = res[pgs++].page; + struct page *page = res[pgs++].page; if (page == NULL) continue; @@ -656,24 +657,24 @@ void echo_persistent_pages_fini(void) int echo_persistent_pages_init(void) { - cfs_page_t *pg; - int i; + struct page *pg; + int i; - for (i = 0; i < ECHO_PERSISTENT_PAGES; i++) { - int gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ? - CFS_ALLOC_STD : CFS_ALLOC_HIGHUSER; + for (i = 0; i < ECHO_PERSISTENT_PAGES; i++) { + int gfp_mask = (i < ECHO_PERSISTENT_PAGES/2) ? + GFP_IOFS : GFP_HIGHUSER; - OBD_PAGE_ALLOC(pg, gfp_mask); - if (pg == NULL) { - echo_persistent_pages_fini (); - return (-ENOMEM); - } + OBD_PAGE_ALLOC(pg, gfp_mask); + if (pg == NULL) { + echo_persistent_pages_fini(); + return -ENOMEM; + } - memset (cfs_kmap (pg), 0, CFS_PAGE_SIZE); - cfs_kunmap (pg); + memset (kmap (pg), 0, PAGE_CACHE_SIZE); + kunmap (pg); - echo_persistent_pages[i] = pg; - } + echo_persistent_pages[i] = pg; + } - return (0); + return 0; } diff --git a/lustre/obdecho/echo_client.c b/lustre/obdecho/echo_client.c index a956f62..cdfe87b 100644 --- a/lustre/obdecho/echo_client.c +++ b/lustre/obdecho/echo_client.c @@ -90,7 +90,7 @@ struct echo_object_conf { struct echo_page { struct cl_page_slice ep_cl; struct mutex ep_lock; - cfs_page_t *ep_vmpage; + struct page *ep_vmpage; }; struct echo_lock { @@ -184,7 +184,7 @@ static int cl_echo_enqueue (struct echo_object *eco, obd_off start, obd_off end, int mode, __u64 *cookie); static int cl_echo_cancel (struct echo_device *d, __u64 cookie); static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset, - cfs_page_t **pages, int npages, int async); + struct page **pages, int npages, int async); static struct echo_thread_info *echo_env_info(const struct lu_env *env); @@ -215,11 +215,11 @@ struct echo_session_info { unsigned long dummy; }; -static cfs_mem_cache_t *echo_lock_kmem; -static cfs_mem_cache_t *echo_object_kmem; -static cfs_mem_cache_t *echo_thread_kmem; -static cfs_mem_cache_t *echo_session_kmem; -//static cfs_mem_cache_t *echo_req_kmem; +static struct kmem_cache *echo_lock_kmem; +static struct kmem_cache *echo_object_kmem; +static struct kmem_cache *echo_thread_kmem; +static struct kmem_cache *echo_session_kmem; +/* static struct kmem_cache *echo_req_kmem; */ static struct lu_kmem_descr echo_caches[] = { { @@ -260,7 +260,7 @@ static struct lu_kmem_descr echo_caches[] = { * * @{ */ -static cfs_page_t *echo_page_vmpage(const struct lu_env *env, +static struct page *echo_page_vmpage(const struct lu_env *env, const struct cl_page_slice *slice) { return cl2echo_page(slice)->ep_vmpage; @@ -312,16 +312,14 @@ static void echo_page_completion(const struct lu_env *env, } static void echo_page_fini(const struct lu_env *env, - struct cl_page_slice *slice) + struct cl_page_slice *slice) { - struct echo_page *ep = cl2echo_page(slice); - struct echo_object *eco = cl2echo_obj(slice->cpl_obj); - cfs_page_t *vmpage = ep->ep_vmpage; - ENTRY; + struct echo_object *eco = cl2echo_obj(slice->cpl_obj); + ENTRY; - cfs_atomic_dec(&eco->eo_npages); - page_cache_release(vmpage); - EXIT; + cfs_atomic_dec(&eco->eo_npages); + page_cache_release(cl2echo_page(slice)->ep_vmpage); + EXIT; } static int echo_page_prep(const struct lu_env *env, @@ -409,7 +407,7 @@ static struct cl_lock_operations echo_lock_ops = { * @{ */ static int echo_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) + struct cl_page *page, struct page *vmpage) { struct echo_page *ep = cl_object_page_slice(obj, page); struct echo_object *eco = cl2echo_obj(obj); @@ -436,7 +434,7 @@ static int echo_lock_init(const struct lu_env *env, struct echo_lock *el; ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, __GFP_IO); if (el != NULL) { cl_lock_slice_add(lock, &el->el_cl, obj, &echo_lock_ops); el->el_object = cl2echo_obj(obj); @@ -615,7 +613,7 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env, /* we're the top dev. */ LASSERT(hdr == NULL); - OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(eco, echo_object_kmem, __GFP_IO); if (eco != NULL) { struct cl_object_header *hdr = &eco->eo_hdr; @@ -679,7 +677,7 @@ static void *echo_thread_key_init(const struct lu_context *ctx, { struct echo_thread_info *info; - OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(info, echo_thread_kmem, __GFP_IO); if (info == NULL) info = ERR_PTR(-ENOMEM); return info; @@ -709,7 +707,7 @@ static void *echo_session_key_init(const struct lu_context *ctx, { struct echo_session_info *session; - OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(session, echo_session_kmem, __GFP_IO); if (session == NULL) session = ERR_PTR(-ENOMEM); return session; @@ -1309,7 +1307,7 @@ static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io, } static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset, - cfs_page_t **pages, int npages, int async) + struct page **pages, int npages, int async) { struct lu_env *env; struct echo_thread_info *info; @@ -1345,7 +1343,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset, rc = cl_echo_enqueue0(env, eco, offset, - offset + npages * CFS_PAGE_SIZE - 1, + offset + npages * PAGE_CACHE_SIZE - 1, rw == READ ? LCK_PR : LCK_PW, &lh.cookie, CEF_NEVER); if (rc < 0) @@ -1416,11 +1414,11 @@ echo_copyout_lsm (struct lov_stripe_md *lsm, void *_ulsm, int ulsm_nob) if (nob > ulsm_nob) return (-EINVAL); - if (cfs_copy_to_user (ulsm, lsm, sizeof(ulsm))) + if (copy_to_user (ulsm, lsm, sizeof(ulsm))) return (-EFAULT); for (i = 0; i < lsm->lsm_stripe_count; i++) { - if (cfs_copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i], + if (copy_to_user (ulsm->lsm_oinfo[i], lsm->lsm_oinfo[i], sizeof(lsm->lsm_oinfo[0]))) return (-EFAULT); } @@ -1437,7 +1435,7 @@ echo_copyin_lsm (struct echo_device *ed, struct lov_stripe_md *lsm, if (ulsm_nob < sizeof (*lsm)) return (-EINVAL); - if (cfs_copy_from_user (lsm, ulsm, sizeof (*lsm))) + if (copy_from_user (lsm, ulsm, sizeof (*lsm))) return (-EFAULT); if (lsm->lsm_stripe_count > ec->ec_nstripes || @@ -1448,7 +1446,7 @@ echo_copyin_lsm (struct echo_device *ed, struct lov_stripe_md *lsm, for (i = 0; i < lsm->lsm_stripe_count; i++) { - if (cfs_copy_from_user(lsm->lsm_oinfo[i], + if (copy_from_user(lsm->lsm_oinfo[i], ((struct lov_stripe_md *)ulsm)-> \ lsm_oinfo[i], sizeof(lsm->lsm_oinfo[0]))) @@ -2173,7 +2171,7 @@ static int echo_md_handler(struct echo_device *ed, int command, OBD_ALLOC(name, namelen + 1); if (name == NULL) GOTO(out_put, rc = -ENOMEM); - if (cfs_copy_from_user(name, data->ioc_pbuf2, namelen)) + if (copy_from_user(name, data->ioc_pbuf2, namelen)) GOTO(out_name, rc = -EFAULT); } @@ -2274,7 +2272,7 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed, lsm->lsm_stripe_count = ec->ec_nstripes; if (lsm->lsm_stripe_size == 0) - lsm->lsm_stripe_size = CFS_PAGE_SIZE; + lsm->lsm_stripe_size = PAGE_CACHE_SIZE; idx = cfs_rand(); @@ -2402,7 +2400,7 @@ echo_get_stripe_off_id (struct lov_stripe_md *lsm, obd_off *offp, obd_id *idp) static void echo_client_page_debug_setup(struct lov_stripe_md *lsm, - cfs_page_t *page, int rw, obd_id id, + struct page *page, int rw, obd_id id, obd_off offset, obd_off count) { char *addr; @@ -2411,11 +2409,11 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm, int delta; /* no partial pages on the client */ - LASSERT(count == CFS_PAGE_SIZE); + LASSERT(count == PAGE_CACHE_SIZE); - addr = cfs_kmap(page); + addr = kmap(page); - for (delta = 0; delta < CFS_PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { if (rw == OBD_BRW_WRITE) { stripe_off = offset + delta; stripe_id = id; @@ -2428,11 +2426,11 @@ echo_client_page_debug_setup(struct lov_stripe_md *lsm, stripe_off, stripe_id); } - cfs_kunmap(page); + kunmap(page); } static int echo_client_page_debug_check(struct lov_stripe_md *lsm, - cfs_page_t *page, obd_id id, + struct page *page, obd_id id, obd_off offset, obd_off count) { obd_off stripe_off; @@ -2443,11 +2441,11 @@ static int echo_client_page_debug_check(struct lov_stripe_md *lsm, int rc2; /* no partial pages on the client */ - LASSERT(count == CFS_PAGE_SIZE); + LASSERT(count == PAGE_CACHE_SIZE); - addr = cfs_kmap(page); + addr = kmap(page); - for (rc = delta = 0; delta < CFS_PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { stripe_off = offset + delta; stripe_id = id; echo_get_stripe_off_id (lsm, &stripe_off, &stripe_id); @@ -2461,7 +2459,7 @@ static int echo_client_page_debug_check(struct lov_stripe_md *lsm, } } - cfs_kunmap(page); + kunmap(page); return rc; } @@ -2474,7 +2472,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, obd_count npages; struct brw_page *pga; struct brw_page *pgp; - cfs_page_t **pages; + struct page **pages; obd_off off; int i; int rc; @@ -2487,7 +2485,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, (oa->o_valid & OBD_MD_FLFLAGS) != 0 && (oa->o_flags & OBD_FL_DEBUG_CHECK) != 0); - gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? CFS_ALLOC_STD : CFS_ALLOC_HIGHUSER; + gfp_mask = ((ostid_id(&oa->o_oi) & 2) == 0) ? GFP_IOFS : GFP_HIGHUSER; LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ); LASSERT(lsm != NULL); @@ -2498,7 +2496,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, RETURN(-EINVAL); /* XXX think again with misaligned I/O */ - npages = count >> CFS_PAGE_SHIFT; + npages = count >> PAGE_CACHE_SHIFT; if (rw == OBD_BRW_WRITE) brw_flags = OBD_BRW_ASYNC; @@ -2515,7 +2513,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, for (i = 0, pgp = pga, off = offset; i < npages; - i++, pgp++, off += CFS_PAGE_SIZE) { + i++, pgp++, off += PAGE_CACHE_SIZE) { LASSERT (pgp->pg == NULL); /* for cleanup */ @@ -2525,7 +2523,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, goto out; pages[i] = pgp->pg; - pgp->count = CFS_PAGE_SIZE; + pgp->count = PAGE_CACHE_SIZE; pgp->off = off; pgp->flag = brw_flags; @@ -2583,8 +2581,8 @@ static int echo_client_prep_commit(const struct lu_env *env, (lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi))) RETURN(-EINVAL); - npages = batch >> CFS_PAGE_SHIFT; - tot_pages = count >> CFS_PAGE_SHIFT; + npages = batch >> PAGE_CACHE_SHIFT; + tot_pages = count >> PAGE_CACHE_SHIFT; OBD_ALLOC(lnb, npages * sizeof(struct niobuf_local)); OBD_ALLOC(rnb, npages * sizeof(struct niobuf_remote)); @@ -2605,9 +2603,9 @@ static int echo_client_prep_commit(const struct lu_env *env, if (tot_pages < npages) npages = tot_pages; - for (i = 0; i < npages; i++, off += CFS_PAGE_SIZE) { + for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { rnb[i].offset = off; - rnb[i].len = CFS_PAGE_SIZE; + rnb[i].len = PAGE_CACHE_SIZE; rnb[i].flags = brw_flags; } @@ -2622,7 +2620,7 @@ static int echo_client_prep_commit(const struct lu_env *env, LASSERT(lpages == npages); for (i = 0; i < lpages; i++) { - cfs_page_t *page = lnb[i].page; + struct page *page = lnb[i].page; /* read past eof? */ if (page == NULL && lnb[i].rc == 0) @@ -2844,7 +2842,7 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, if (dir == NULL) GOTO(out, rc = -ENOMEM); - if (cfs_copy_from_user(dir, data->ioc_pbuf1, dirlen)) { + if (copy_from_user(dir, data->ioc_pbuf1, dirlen)) { OBD_FREE(dir, data->ioc_plen1 + 1); GOTO(out, rc = -EFAULT); } @@ -2881,11 +2879,11 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len, GOTO(out, rc); } - if (cfs_copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1)) + if (copy_to_user(data->ioc_pbuf1, &seq, data->ioc_plen1)) return -EFAULT; max_count = LUSTRE_METADATA_SEQ_MAX_WIDTH; - if (cfs_copy_to_user(data->ioc_pbuf2, &max_count, + if (copy_to_user(data->ioc_pbuf2, &max_count, data->ioc_plen2)) return -EFAULT; GOTO(out, rc); @@ -3204,7 +3202,7 @@ static int __init obdecho_init(void) ENTRY; LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); - LASSERT(CFS_PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); + LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); lprocfs_echo_init_vars(&lvars); diff --git a/lustre/obdecho/echo_internal.h b/lustre/obdecho/echo_internal.h index 62a7a59..fdaa43c 100644 --- a/lustre/obdecho/echo_internal.h +++ b/lustre/obdecho/echo_internal.h @@ -51,13 +51,6 @@ void echo_persistent_pages_fini(void); # endif #else /* ! __KERNEL__ */ /* Kludge here, define some functions and macros needed by liblustre -jay */ -static inline void page_cache_get(struct page *page) -{ -} - -static inline void page_cache_release(struct page *page) -{ -} #define READ 0 #define WRITE 1 diff --git a/lustre/ofd/ofd_dev.c b/lustre/ofd/ofd_dev.c index 0719d7d..20e3c59 100644 --- a/lustre/ofd/ofd_dev.c +++ b/lustre/ofd/ofd_dev.c @@ -49,7 +49,7 @@ #include "ofd_internal.h" /* Slab for OFD object allocation */ -static cfs_mem_cache_t *ofd_object_kmem; +static struct kmem_cache *ofd_object_kmem; static struct lu_kmem_descr ofd_caches[] = { { @@ -313,7 +313,7 @@ static struct lu_object *ofd_object_alloc(const struct lu_env *env, ENTRY; - OBD_SLAB_ALLOC_PTR_GFP(of, ofd_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(of, ofd_object_kmem, __GFP_IO); if (of != NULL) { struct lu_object *o; struct lu_object_header *h; diff --git a/lustre/ofd/ofd_fmd.c b/lustre/ofd/ofd_fmd.c index ab2a5a8..cee17d2 100644 --- a/lustre/ofd/ofd_fmd.c +++ b/lustre/ofd/ofd_fmd.c @@ -40,7 +40,7 @@ #include "ofd_internal.h" -static cfs_mem_cache_t *ll_fmd_cachep; +static struct kmem_cache *ll_fmd_cachep; /* drop fmd reference, free it if last ref. must be called with fed_lock held.*/ static inline void ofd_fmd_put_nolock(struct obd_export *exp, @@ -227,9 +227,9 @@ void ofd_fmd_cleanup(struct obd_export *exp) int ofd_fmd_init(void) { - ll_fmd_cachep = cfs_mem_cache_create("ll_fmd_cache", - sizeof(struct ofd_mod_data), - 0, 0); + ll_fmd_cachep = kmem_cache_create("ll_fmd_cache", + sizeof(struct ofd_mod_data), + 0, 0, NULL); if (!ll_fmd_cachep) return -ENOMEM; else @@ -239,9 +239,7 @@ int ofd_fmd_init(void) void ofd_fmd_exit(void) { if (ll_fmd_cachep) { - int rc = cfs_mem_cache_destroy(ll_fmd_cachep); - - LASSERTF(rc == 0, "Cannot destroy ll_fmd_cachep: rc %d\n", rc); + kmem_cache_destroy(ll_fmd_cachep); ll_fmd_cachep = NULL; } } diff --git a/lustre/ofd/ofd_internal.h b/lustre/ofd/ofd_internal.h index f653b49..5dac3ab 100644 --- a/lustre/ofd/ofd_internal.h +++ b/lustre/ofd/ofd_internal.h @@ -440,7 +440,7 @@ static inline int ofd_grant_compat(struct obd_export *exp, struct ofd_device *ofd) { /* Clients which don't support OBD_CONNECT_GRANT_PARAM cannot handle - * a block size > page size and consume CFS_PAGE_SIZE of grant when + * a block size > page size and consume PAGE_CACHE_SIZE of grant when * dirtying a page regardless of the block size */ return !!(ofd_obd(ofd)->obd_self_export != exp && ofd->ofd_blockbits > COMPAT_BSIZE_SHIFT && @@ -453,7 +453,7 @@ static inline int ofd_grant_prohibit(struct obd_export *exp, /* When ofd_grant_compat_disable is set, we don't grant any space to * clients not supporting OBD_CONNECT_GRANT_PARAM. * Otherwise, space granted to such a client is inflated since it - * consumes CFS_PAGE_SIZE of grant space per block */ + * consumes PAGE_CACHE_SIZE of grant space per block */ return !!(ofd_grant_compat(exp, ofd) && ofd->ofd_grant_compat_disable); } diff --git a/lustre/ofd/ofd_obd.c b/lustre/ofd/ofd_obd.c index 6319e3d..6ff96ec 100644 --- a/lustre/ofd/ofd_obd.c +++ b/lustre/ofd/ofd_obd.c @@ -1603,11 +1603,11 @@ static int ofd_health_check(const struct lu_env *nul, struct obd_device *obd) GOTO(out, rc = -EROFS); #ifdef USE_HEALTH_CHECK_WRITE - OBD_ALLOC(info->fti_buf.lb_buf, CFS_PAGE_SIZE); + OBD_ALLOC(info->fti_buf.lb_buf, PAGE_CACHE_SIZE); if (info->fti_buf.lb_buf == NULL) GOTO(out, rc = -ENOMEM); - info->fti_buf.lb_len = CFS_PAGE_SIZE; + info->fti_buf.lb_len = PAGE_CACHE_SIZE; info->fti_off = 0; th = dt_trans_create(&env, ofd->ofd_osd); @@ -1626,7 +1626,7 @@ static int ofd_health_check(const struct lu_env *nul, struct obd_device *obd) } dt_trans_stop(&env, ofd->ofd_osd, th); - OBD_FREE(info->fti_buf.lb_buf, CFS_PAGE_SIZE); + OBD_FREE(info->fti_buf.lb_buf, PAGE_CACHE_SIZE); CDEBUG(D_INFO, "write 1 page synchronously for checking io rc %d\n",rc); #endif diff --git a/lustre/osc/lproc_osc.c b/lustre/osc/lproc_osc.c index 1ee6d60..ba23711 100644 --- a/lustre/osc/lproc_osc.c +++ b/lustre/osc/lproc_osc.c @@ -134,28 +134,28 @@ static int osc_rd_max_dirty_mb(char *page, char **start, off_t off, int count, } static int osc_wr_max_dirty_mb(struct file *file, const char *buffer, - unsigned long count, void *data) + unsigned long count, void *data) { - struct obd_device *dev = data; - struct client_obd *cli = &dev->u.cli; - int pages_number, mult, rc; + struct obd_device *dev = data; + struct client_obd *cli = &dev->u.cli; + int pages_number, mult, rc; - mult = 1 << (20 - CFS_PAGE_SHIFT); - rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); - if (rc) - return rc; + mult = 1 << (20 - PAGE_CACHE_SHIFT); + rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); + if (rc) + return rc; - if (pages_number <= 0 || - pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - CFS_PAGE_SHIFT) || - pages_number > cfs_num_physpages / 4) /* 1/4 of RAM */ - return -ERANGE; + if (pages_number <= 0 || + pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || + pages_number > num_physpages / 4) /* 1/4 of RAM */ + return -ERANGE; - client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_dirty_max = (obd_count)(pages_number << CFS_PAGE_SHIFT); - osc_wake_cache_waiters(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); + client_obd_list_lock(&cli->cl_loi_list_lock); + cli->cl_dirty_max = (obd_count)(pages_number << PAGE_CACHE_SHIFT); + osc_wake_cache_waiters(cli); + client_obd_list_unlock(&cli->cl_loi_list_lock); - return count; + return count; } static int osc_rd_cached_mb(char *page, char **start, off_t off, int count, @@ -163,7 +163,7 @@ static int osc_rd_cached_mb(char *page, char **start, off_t off, int count, { struct obd_device *dev = data; struct client_obd *cli = &dev->u.cli; - int shift = 20 - CFS_PAGE_SHIFT; + int shift = 20 - PAGE_CACHE_SHIFT; int rc; rc = snprintf(page, count, @@ -184,7 +184,7 @@ static int osc_wr_cached_mb(struct file *file, const char *buffer, struct client_obd *cli = &dev->u.cli; int pages_number, mult, rc; - mult = 1 << (20 - CFS_PAGE_SHIFT); + mult = 1 << (20 - PAGE_CACHE_SHIFT); buffer = lprocfs_find_named_value(buffer, "used_mb:", &count); rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); if (rc) @@ -371,7 +371,7 @@ static int osc_wd_checksum_type(struct file *file, const char *buffer, if (count > sizeof(kernbuf) - 1) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; if (count > 0 && kernbuf[count - 1] == '\n') kernbuf[count - 1] = '\0'; @@ -477,14 +477,14 @@ static int lprocfs_osc_wr_max_pages_per_rpc(struct file *file, /* if the max_pages is specified in bytes, convert to pages */ if (val >= ONE_MB_BRW_SIZE) - val >>= CFS_PAGE_SHIFT; + val >>= PAGE_CACHE_SHIFT; LPROCFS_CLIMP_CHECK(dev); - chunk_mask = ~((1 << (cli->cl_chunkbits - CFS_PAGE_SHIFT)) - 1); + chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); /* max_pages_per_rpc must be chunk aligned */ val = (val + ~chunk_mask) & chunk_mask; - if (val == 0 || val > ocd->ocd_brw_size >> CFS_PAGE_SHIFT) { + if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) { LPROCFS_CLIMP_EXIT(dev); return -ERANGE; } diff --git a/lustre/osc/osc_cache.c b/lustre/osc/osc_cache.c index e43511a..8536171 100644 --- a/lustre/osc/osc_cache.c +++ b/lustre/osc/osc_cache.c @@ -309,7 +309,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj) { struct osc_extent *ext; - OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, CFS_ALLOC_STD); + OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_IOFS); if (ext == NULL) return NULL; @@ -505,7 +505,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, return -ERANGE; LASSERT(cur->oe_osclock == victim->oe_osclock); - ppc_bits = osc_cli(obj)->cl_chunkbits - CFS_PAGE_SHIFT; + ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; chunk_start = cur->oe_start >> ppc_bits; chunk_end = cur->oe_end >> ppc_bits; if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && @@ -612,8 +612,8 @@ struct osc_extent *osc_extent_find(const struct lu_env *env, LASSERT(lock != NULL); LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); - LASSERT(cli->cl_chunkbits >= CFS_PAGE_SHIFT); - ppc_bits = cli->cl_chunkbits - CFS_PAGE_SHIFT; + LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); + ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; chunk_mask = ~((1 << ppc_bits) - 1); chunksize = 1 << cli->cl_chunkbits; chunk = index >> ppc_bits; @@ -827,8 +827,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (!sent) { lost_grant = ext->oe_grants; - } else if (blocksize < CFS_PAGE_SIZE && - last_count != CFS_PAGE_SIZE) { + } else if (blocksize < PAGE_CACHE_SIZE && + last_count != PAGE_CACHE_SIZE) { /* For short writes we shouldn't count parts of pages that * span a whole chunk on the OST side, or our accounting goes * wrong. Should match the code in filter_grant_check. */ @@ -838,7 +838,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (end) count += blocksize - end; - lost_grant = CFS_PAGE_SIZE - count; + lost_grant = PAGE_CACHE_SIZE - count; } if (ext->oe_grants > 0) osc_free_grant(cli, nr_pages, lost_grant); @@ -920,7 +920,8 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, struct osc_async_page *oap; struct osc_async_page *tmp; int pages_in_chunk = 0; - int ppc_bits = cli->cl_chunkbits - CFS_PAGE_SHIFT; + int ppc_bits = cli->cl_chunkbits - + PAGE_CACHE_SHIFT; __u64 trunc_chunk = trunc_index >> ppc_bits; int grants = 0; int nr_pages = 0; @@ -1077,7 +1078,7 @@ static int osc_extent_make_ready(const struct lu_env *env, if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); LASSERT(last->oap_count > 0); - LASSERT(last->oap_page_off + last->oap_count <= CFS_PAGE_SIZE); + LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); last->oap_async_flags |= ASYNC_COUNT_STABLE; } @@ -1085,7 +1086,7 @@ static int osc_extent_make_ready(const struct lu_env *env, * because it's known they are not the last page */ cfs_list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { - oap->oap_count = CFS_PAGE_SIZE - oap->oap_page_off; + oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; oap->oap_async_flags |= ASYNC_COUNT_STABLE; } } @@ -1109,7 +1110,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants) struct osc_object *obj = ext->oe_obj; struct client_obd *cli = osc_cli(obj); struct osc_extent *next; - int ppc_bits = cli->cl_chunkbits - CFS_PAGE_SHIFT; + int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; pgoff_t chunk = index >> ppc_bits; pgoff_t end_chunk; pgoff_t end_index; @@ -1241,9 +1242,9 @@ static int osc_refresh_count(const struct lu_env *env, return 0; else if (cl_offset(obj, page->cp_index + 1) > kms) /* catch sub-page write at end of file */ - return kms % CFS_PAGE_SIZE; + return kms % PAGE_CACHE_SIZE; else - return CFS_PAGE_SIZE; + return PAGE_CACHE_SIZE; } static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, @@ -1326,10 +1327,10 @@ static void osc_consume_write_grant(struct client_obd *cli, LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock); LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); cfs_atomic_inc(&obd_dirty_pages); - cli->cl_dirty += CFS_PAGE_SIZE; + cli->cl_dirty += PAGE_CACHE_SIZE; pga->flag |= OBD_BRW_FROM_GRANT; CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", - CFS_PAGE_SIZE, pga, pga->pg); + PAGE_CACHE_SIZE, pga, pga->pg); osc_update_next_shrink(cli); } @@ -1348,11 +1349,11 @@ static void osc_release_write_grant(struct client_obd *cli, pga->flag &= ~OBD_BRW_FROM_GRANT; cfs_atomic_dec(&obd_dirty_pages); - cli->cl_dirty -= CFS_PAGE_SIZE; + cli->cl_dirty -= PAGE_CACHE_SIZE; if (pga->flag & OBD_BRW_NOCACHE) { pga->flag &= ~OBD_BRW_NOCACHE; cfs_atomic_dec(&obd_dirty_transit_pages); - cli->cl_dirty_transit -= CFS_PAGE_SIZE; + cli->cl_dirty_transit -= PAGE_CACHE_SIZE; } EXIT; } @@ -1408,7 +1409,7 @@ void osc_unreserve_grant(struct client_obd *cli, * used, we should return these grants to OST. There're two cases where grants * can be lost: * 1. truncate; - * 2. blocksize at OST is less than CFS_PAGE_SIZE and a partial page was + * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was * written. In this case OST may use less chunks to serve this partial * write. OSTs don't actually know the page size on the client side. so * clients have to calculate lost grant by the blocksize on the OST. @@ -1421,7 +1422,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, client_obd_list_lock(&cli->cl_loi_list_lock); cfs_atomic_sub(nr_pages, &obd_dirty_pages); - cli->cl_dirty -= nr_pages << CFS_PAGE_SHIFT; + cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; cli->cl_lost_grant += lost_grant; if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { /* borrow some grant from truncate to avoid the case that @@ -1463,11 +1464,11 @@ static int osc_enter_cache_try(struct client_obd *cli, if (rc < 0) return 0; - if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max && + if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max && cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { osc_consume_write_grant(cli, &oap->oap_brw_page); if (transient) { - cli->cl_dirty_transit += CFS_PAGE_SIZE; + cli->cl_dirty_transit += PAGE_CACHE_SIZE; cfs_atomic_inc(&obd_dirty_transit_pages); oap->oap_brw_flags |= OBD_BRW_NOCACHE; } @@ -1512,7 +1513,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, /* force the caller to try sync io. this can jump the list * of queued writes and create a discontiguous rpc stream */ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || - cli->cl_dirty_max < CFS_PAGE_SIZE || + cli->cl_dirty_max < PAGE_CACHE_SIZE || cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) GOTO(out, rc = -EDQUOT); @@ -1577,7 +1578,7 @@ void osc_wake_cache_waiters(struct client_obd *cli) ocw->ocw_rc = -EDQUOT; /* we can't dirty more */ - if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) || + if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) || (cfs_atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) { CDEBUG(D_CACHE, "no dirty room: dirty: %ld " @@ -2162,7 +2163,7 @@ void osc_io_unplug(const struct lu_env *env, struct client_obd *cli, } int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops, - cfs_page_t *page, loff_t offset) + struct page *page, loff_t offset) { struct obd_export *exp = osc_export(osc); struct osc_async_page *oap = &ops->ops_oap; @@ -2443,7 +2444,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io, oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT; spin_unlock(&oap->oap_lock); - if (cfs_memory_pressure_get()) + if (memory_pressure_get()) ext->oe_memalloc = 1; ext->oe_urgent = 1; diff --git a/lustre/osc/osc_cl_internal.h b/lustre/osc/osc_cl_internal.h index d53c29f..33b06e7 100644 --- a/lustre/osc/osc_cl_internal.h +++ b/lustre/osc/osc_cl_internal.h @@ -403,12 +403,12 @@ struct osc_page { struct cl_lock *ops_lock; }; -extern cfs_mem_cache_t *osc_lock_kmem; -extern cfs_mem_cache_t *osc_object_kmem; -extern cfs_mem_cache_t *osc_thread_kmem; -extern cfs_mem_cache_t *osc_session_kmem; -extern cfs_mem_cache_t *osc_req_kmem; -extern cfs_mem_cache_t *osc_extent_kmem; +extern struct kmem_cache *osc_lock_kmem; +extern struct kmem_cache *osc_object_kmem; +extern struct kmem_cache *osc_thread_kmem; +extern struct kmem_cache *osc_session_kmem; +extern struct kmem_cache *osc_req_kmem; +extern struct kmem_cache *osc_extent_kmem; extern struct lu_device_type osc_device_type; extern struct lu_context_key osc_key; @@ -427,7 +427,7 @@ struct lu_object *osc_object_alloc(const struct lu_env *env, const struct lu_object_header *hdr, struct lu_device *dev); int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage); + struct cl_page *page, struct page *vmpage); void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj, pgoff_t start, pgoff_t end); @@ -440,7 +440,7 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops); int osc_set_async_flags(struct osc_object *obj, struct osc_page *opg, obd_flag async_flags); int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops, - cfs_page_t *page, loff_t offset); + struct page *page, loff_t offset); int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, struct osc_page *ops); int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj, diff --git a/lustre/osc/osc_dev.c b/lustre/osc/osc_dev.c index b5bacb4..44549b4 100644 --- a/lustre/osc/osc_dev.c +++ b/lustre/osc/osc_dev.c @@ -49,13 +49,13 @@ * @{ */ -cfs_mem_cache_t *osc_lock_kmem; -cfs_mem_cache_t *osc_object_kmem; -cfs_mem_cache_t *osc_thread_kmem; -cfs_mem_cache_t *osc_session_kmem; -cfs_mem_cache_t *osc_req_kmem; -cfs_mem_cache_t *osc_extent_kmem; -cfs_mem_cache_t *osc_quota_kmem; +struct kmem_cache *osc_lock_kmem; +struct kmem_cache *osc_object_kmem; +struct kmem_cache *osc_thread_kmem; +struct kmem_cache *osc_session_kmem; +struct kmem_cache *osc_req_kmem; +struct kmem_cache *osc_extent_kmem; +struct kmem_cache *osc_quota_kmem; struct lu_kmem_descr osc_caches[] = { { @@ -118,14 +118,14 @@ static struct lu_device *osc2lu_dev(struct osc_device *osc) */ static void *osc_key_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { - struct osc_thread_info *info; + struct osc_thread_info *info; - OBD_SLAB_ALLOC_PTR_GFP(info, osc_thread_kmem, CFS_ALLOC_IO); - if (info == NULL) - info = ERR_PTR(-ENOMEM); - return info; + OBD_SLAB_ALLOC_PTR_GFP(info, osc_thread_kmem, __GFP_IO); + if (info == NULL) + info = ERR_PTR(-ENOMEM); + return info; } static void osc_key_fini(const struct lu_context *ctx, @@ -142,14 +142,14 @@ struct lu_context_key osc_key = { }; static void *osc_session_init(const struct lu_context *ctx, - struct lu_context_key *key) + struct lu_context_key *key) { - struct osc_session *info; + struct osc_session *info; - OBD_SLAB_ALLOC_PTR_GFP(info, osc_session_kmem, CFS_ALLOC_IO); - if (info == NULL) - info = ERR_PTR(-ENOMEM); - return info; + OBD_SLAB_ALLOC_PTR_GFP(info, osc_session_kmem, __GFP_IO); + if (info == NULL) + info = ERR_PTR(-ENOMEM); + return info; } static void osc_session_fini(const struct lu_context *ctx, diff --git a/lustre/osc/osc_internal.h b/lustre/osc/osc_internal.h index e9311d8..31fe062 100644 --- a/lustre/osc/osc_internal.h +++ b/lustre/osc/osc_internal.h @@ -188,7 +188,7 @@ static inline struct osc_device *obd2osc_dev(const struct obd_device *d) int osc_dlm_lock_pageref(struct ldlm_lock *dlm); -extern cfs_mem_cache_t *osc_quota_kmem; +extern struct kmem_cache *osc_quota_kmem; struct osc_quota_info { /** linkage for quota hash table */ cfs_hlist_node_t oqi_hash; diff --git a/lustre/osc/osc_io.c b/lustre/osc/osc_io.c index c3d7f02..3535bbe 100644 --- a/lustre/osc/osc_io.c +++ b/lustre/osc/osc_io.c @@ -365,7 +365,7 @@ static int trunc_check_cb(const struct lu_env *env, struct cl_io *io, #ifdef __linux__ { - cfs_page_t *vmpage = cl_page_vmpage(env, page); + struct page *vmpage = cl_page_vmpage(env, page); if (PageLocked(vmpage)) CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n", ops, page->cp_index, @@ -834,7 +834,7 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev, struct osc_req *or; int result; - OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(or, osc_req_kmem, __GFP_IO); if (or != NULL) { cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops); result = 0; diff --git a/lustre/osc/osc_lock.c b/lustre/osc/osc_lock.c index f32ba86..186e527 100644 --- a/lustre/osc/osc_lock.c +++ b/lustre/osc/osc_lock.c @@ -1636,14 +1636,14 @@ static const struct cl_lock_operations osc_lock_lockless_ops = { }; int osc_lock_init(const struct lu_env *env, - struct cl_object *obj, struct cl_lock *lock, - const struct cl_io *unused) + struct cl_object *obj, struct cl_lock *lock, + const struct cl_io *unused) { - struct osc_lock *clk; - int result; + struct osc_lock *clk; + int result; - OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, CFS_ALLOC_IO); - if (clk != NULL) { + OBD_SLAB_ALLOC_PTR_GFP(clk, osc_lock_kmem, __GFP_IO); + if (clk != NULL) { __u32 enqflags = lock->cll_descr.cld_enq_flags; osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo); @@ -1668,10 +1668,10 @@ int osc_lock_init(const struct lu_env *env, LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n", lock, clk, clk->ols_flags); - result = 0; - } else - result = -ENOMEM; - return result; + result = 0; + } else + result = -ENOMEM; + return result; } int osc_dlm_lock_pageref(struct ldlm_lock *dlm) diff --git a/lustre/osc/osc_object.c b/lustre/osc/osc_object.c index 2f02791..8d6eec6 100644 --- a/lustre/osc/osc_object.c +++ b/lustre/osc/osc_object.c @@ -264,7 +264,7 @@ struct lu_object *osc_object_alloc(const struct lu_env *env, struct osc_object *osc; struct lu_object *obj; - OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(osc, osc_object_kmem, __GFP_IO); if (osc != NULL) { obj = osc2lu(osc); lu_object_init(obj, NULL, dev); diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index 89c0aa0..0691f7b 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -508,14 +508,14 @@ static const struct cl_page_operations osc_page_ops = { }; int osc_page_init(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, cfs_page_t *vmpage) + struct cl_page *page, struct page *vmpage) { struct osc_object *osc = cl2osc(obj); struct osc_page *opg = cl_object_page_slice(obj, page); int result; opg->ops_from = 0; - opg->ops_to = CFS_PAGE_SIZE; + opg->ops_to = PAGE_CACHE_SIZE; result = osc_prep_async_page(osc, opg, vmpage, cl_offset(obj, page->cp_index)); @@ -593,9 +593,9 @@ static CFS_DECL_WAITQ(osc_lru_waitq); static cfs_atomic_t osc_lru_waiters = CFS_ATOMIC_INIT(0); /* LRU pages are freed in batch mode. OSC should at least free this * number of pages to avoid running out of LRU budget, and.. */ -static const int lru_shrink_min = 2 << (20 - CFS_PAGE_SHIFT); /* 2M */ +static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ /* free this number at most otherwise it will take too long time to finsih. */ -static const int lru_shrink_max = 32 << (20 - CFS_PAGE_SHIFT); /* 32M */ +static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ /* Check if we can free LRU slots from this OSC. If there exists LRU waiters, * we should free slots aggressively. In this way, slots are freed in a steady @@ -810,7 +810,7 @@ static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del) * cl_lru_shrinkers is to avoid recursive call in case * we're already in the context of osc_lru_shrink(). */ if (cfs_atomic_read(&cli->cl_lru_shrinkers) == 0 && - !cfs_memory_pressure_get()) + !memory_pressure_get()) osc_lru_shrink(cli, osc_cache_too_much(cli)); cfs_waitq_signal(&osc_lru_waitq); } diff --git a/lustre/osc/osc_request.c b/lustre/osc/osc_request.c index 0497771..6b0ead0 100644 --- a/lustre/osc/osc_request.c +++ b/lustre/osc/osc_request.c @@ -855,7 +855,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, oa->o_undirty = 0; } else { long max_in_flight = (cli->cl_max_pages_per_rpc << - CFS_PAGE_SHIFT)* + PAGE_CACHE_SHIFT) * (cli->cl_max_rpcs_in_flight + 1); oa->o_undirty = max(cli->cl_dirty_max, max_in_flight); } @@ -937,11 +937,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) static int osc_shrink_grant(struct client_obd *cli) { __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * - (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT); + (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); client_obd_list_lock(&cli->cl_loi_list_lock); if (cli->cl_avail_grant <= target_bytes) - target_bytes = cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT; + target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; client_obd_list_unlock(&cli->cl_loi_list_lock); return osc_shrink_grant_to_target(cli, target_bytes); @@ -957,8 +957,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) /* Don't shrink if we are already above or below the desired limit * We don't want to shrink below a single RPC, as that will negatively * impact block allocation and long-term performance. */ - if (target_bytes < cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT) - target_bytes = cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT; + if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) + target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; if (target_bytes >= cli->cl_avail_grant) { client_obd_list_unlock(&cli->cl_loi_list_lock); @@ -1005,7 +1005,7 @@ static int osc_should_shrink_grant(struct client_obd *client) /* Get the current RPC size directly, instead of going via: * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) * Keep comment here so that it can be found by searching. */ - int brw_size = client->cl_max_pages_per_rpc << CFS_PAGE_SHIFT; + int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; if (client->cl_import->imp_state == LUSTRE_IMP_FULL && client->cl_avail_grant > brw_size) @@ -1079,7 +1079,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) } /* determine the appropriate chunk size used by osc_extent. */ - cli->cl_chunkbits = max_t(int, CFS_PAGE_SHIFT, ocd->ocd_blocksize); + cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld." @@ -1105,29 +1105,29 @@ static void handle_short_read(int nob_read, obd_count page_count, while (nob_read > 0) { LASSERT (page_count > 0); - if (pga[i]->count > nob_read) { - /* EOF inside this page */ - ptr = cfs_kmap(pga[i]->pg) + - (pga[i]->off & ~CFS_PAGE_MASK); - memset(ptr + nob_read, 0, pga[i]->count - nob_read); - cfs_kunmap(pga[i]->pg); - page_count--; - i++; - break; - } + if (pga[i]->count > nob_read) { + /* EOF inside this page */ + ptr = kmap(pga[i]->pg) + + (pga[i]->off & ~CFS_PAGE_MASK); + memset(ptr + nob_read, 0, pga[i]->count - nob_read); + kunmap(pga[i]->pg); + page_count--; + i++; + break; + } nob_read -= pga[i]->count; page_count--; i++; } - /* zero remaining pages */ - while (page_count-- > 0) { - ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK); - memset(ptr, 0, pga[i]->count); - cfs_kunmap(pga[i]->pg); - i++; - } + /* zero remaining pages */ + while (page_count-- > 0) { + ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK); + memset(ptr, 0, pga[i]->count); + kunmap(pga[i]->pg); + i++; + } } static int check_write_rcs(struct ptlrpc_request *req, @@ -1212,10 +1212,10 @@ static obd_count osc_checksum_bulk(int nob, obd_count pg_count, * simulate an OST->client data error */ if (i == 0 && opc == OST_READ && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) { - unsigned char *ptr = cfs_kmap(pga[i]->pg); + unsigned char *ptr = kmap(pga[i]->pg); int off = pga[i]->off & ~CFS_PAGE_MASK; memcpy(ptr + off, "bad1", min(4, nob)); - cfs_kunmap(pga[i]->pg); + kunmap(pga[i]->pg); } cfs_crypto_hash_update_page(hdesc, pga[i]->pg, pga[i]->off & ~CFS_PAGE_MASK, @@ -1333,13 +1333,13 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa, LASSERT(pg->count > 0); /* make sure there is no gap in the middle of page array */ - LASSERTF(page_count == 1 || - (ergo(i == 0, poff + pg->count == CFS_PAGE_SIZE) && - ergo(i > 0 && i < page_count - 1, - poff == 0 && pg->count == CFS_PAGE_SIZE) && - ergo(i == page_count - 1, poff == 0)), - "i: %d/%d pg: %p off: "LPU64", count: %u\n", - i, page_count, pg, pg->off, pg->count); + LASSERTF(page_count == 1 || + (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && + ergo(i > 0 && i < page_count - 1, + poff == 0 && pg->count == PAGE_CACHE_SIZE) && + ergo(i == page_count - 1, poff == 0)), + "i: %d/%d pg: %p off: "LPU64", count: %u\n", + i, page_count, pg, pg->off, pg->count); #ifdef __linux__ LASSERTF(i == 0 || pg->off > pg_prev->off, "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64 @@ -1840,7 +1840,7 @@ static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages) if (pages == 0) /* that's all */ return count; - if (offset + pg[i]->count < CFS_PAGE_SIZE) + if (offset + pg[i]->count < PAGE_CACHE_SIZE) return count; /* doesn't end on page boundary */ i++; @@ -2099,7 +2099,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, oap->oap_count; else LASSERT(oap->oap_page_off + oap->oap_count == - CFS_PAGE_SIZE); + PAGE_CACHE_SIZE); } } @@ -2133,7 +2133,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, pga[i] = &oap->oap_brw_page; pga[i]->off = oap->oap_obj_off + oap->oap_page_off; CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n", - pga[i]->pg, cfs_page_index(oap->oap_page), oap, + pga[i]->pg, page_index(oap->oap_page), oap, pga[i]->flag); i++; cl_req_page_add(env, clerq, page); @@ -2201,7 +2201,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, tmp->oap_request = ptlrpc_request_addref(req); client_obd_list_lock(&cli->cl_loi_list_lock); - starting_offset >>= CFS_PAGE_SHIFT; + starting_offset >>= PAGE_CACHE_SHIFT; if (cmd == OBD_BRW_READ) { cli->cl_r_in_flight++; lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); @@ -2891,7 +2891,7 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) /* we only need the header part from user space to get lmm_magic and * lmm_stripe_count, (the header part is common to v1 and v3) */ lum_size = sizeof(struct lov_user_md_v1); - if (cfs_copy_from_user(&lum, lump, lum_size)) + if (copy_from_user(&lum, lump, lum_size)) RETURN(-EFAULT); if ((lum.lmm_magic != LOV_USER_MAGIC_V1) && @@ -2923,15 +2923,15 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump) } lumk->lmm_oi = lsm->lsm_oi; - lumk->lmm_stripe_count = 1; + lumk->lmm_stripe_count = 1; - if (cfs_copy_to_user(lump, lumk, lum_size)) - rc = -EFAULT; + if (copy_to_user(lump, lumk, lum_size)) + rc = -EFAULT; - if (lumk != &lum) - OBD_FREE(lumk, lum_size); + if (lumk != &lum) + OBD_FREE(lumk, lum_size); - RETURN(rc); + RETURN(rc); } @@ -2981,7 +2981,7 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len, memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid)); - err = cfs_copy_to_user((void *)uarg, buf, len); + err = copy_to_user((void *)uarg, buf, len); if (err) err = -EFAULT; obd_ioctl_freedata(buf, len); @@ -3089,12 +3089,12 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp, CFS_PAGE_MASK; if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= - fm_key->fiemap.fm_start + CFS_PAGE_SIZE - 1) + fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) policy.l_extent.end = OBD_OBJECT_EOF; else policy.l_extent.end = (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length + - CFS_PAGE_SIZE - 1) & CFS_PAGE_MASK; + PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK; ostid_build_res_name(&fm_key->oa.o_oi, &res_id); mode = ldlm_lock_match(exp->exp_obd->obd_namespace, diff --git a/lustre/osd-ldiskfs/osd_handler.c b/lustre/osd-ldiskfs/osd_handler.c index af533f9..f5e3edf 100644 --- a/lustre/osd-ldiskfs/osd_handler.c +++ b/lustre/osd-ldiskfs/osd_handler.c @@ -770,7 +770,7 @@ static struct thandle *osd_trans_create(const struct lu_env *env, LASSERT(cfs_atomic_read(&iobuf->dr_numreqs) == 0); th = ERR_PTR(-ENOMEM); - OBD_ALLOC_GFP(oh, sizeof *oh, CFS_ALLOC_IO); + OBD_ALLOC_GFP(oh, sizeof *oh, __GFP_IO); if (oh != NULL) { oh->ot_quota_trans = &oti->oti_quota_trans; memset(oh->ot_quota_trans, 0, sizeof(*oh->ot_quota_trans)); @@ -5368,7 +5368,7 @@ static int osd_mount(const struct lu_env *env, RETURN(-ENOTSUPP); } - OBD_PAGE_ALLOC(__page, CFS_ALLOC_STD); + OBD_PAGE_ALLOC(__page, GFP_IOFS); if (__page == NULL) GOTO(out, rc = -ENOMEM); @@ -5378,7 +5378,7 @@ static int osd_mount(const struct lu_env *env, if (str) lmd_flags = simple_strtoul(str + 1, NULL, 0); opts = lustre_cfg_string(cfg, 3); - page = (unsigned long)cfs_page_address(__page); + page = (unsigned long)page_address(__page); options = (char *)page; *options = '\0'; if (opts == NULL) @@ -5389,7 +5389,7 @@ static int osd_mount(const struct lu_env *env, /* Glom up mount options */ if (*options != '\0') strcat(options, ","); - strlcat(options, "no_mbcache", CFS_PAGE_SIZE); + strlcat(options, "no_mbcache", PAGE_CACHE_SIZE); type = get_fs_type("ldiskfs"); if (!type) { diff --git a/lustre/osd-ldiskfs/osd_internal.h b/lustre/osd-ldiskfs/osd_internal.h index 2ee8cc5..6733819 100644 --- a/lustre/osd-ldiskfs/osd_internal.h +++ b/lustre/osd-ldiskfs/osd_internal.h @@ -406,7 +406,7 @@ struct osd_it_ea_dirent { * there would be one ext3 readdir for every mdd readdir page. */ -#define OSD_IT_EA_BUFSIZE (CFS_PAGE_SIZE + CFS_PAGE_SIZE/4) +#define OSD_IT_EA_BUFSIZE (PAGE_CACHE_SIZE + PAGE_CACHE_SIZE/4) /** * This is iterator's in-memory data structure in interoperability @@ -457,7 +457,7 @@ struct osd_it_quota { cfs_list_t oiq_list; }; -#define MAX_BLOCKS_PER_PAGE (CFS_PAGE_SIZE / 512) +#define MAX_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / 512) struct osd_iobuf { cfs_waitq_t dr_wait; diff --git a/lustre/osd-ldiskfs/osd_io.c b/lustre/osd-ldiskfs/osd_io.c index c9eb4eb..529c8f9 100644 --- a/lustre/osd-ldiskfs/osd_io.c +++ b/lustre/osd-ldiskfs/osd_io.c @@ -113,7 +113,7 @@ static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf, iobuf->dr_rw = rw; iobuf->dr_init_at = line; - blocks = pages * (CFS_PAGE_SIZE >> osd_sb(d)->s_blocksize_bits); + blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits); if (iobuf->dr_bl_buf.lb_len >= blocks * sizeof(iobuf->dr_blocks[0])) { LASSERT(iobuf->dr_pg_buf.lb_len >= pages * sizeof(iobuf->dr_pages[0])); @@ -128,7 +128,7 @@ static int __osd_init_iobuf(struct osd_device *d, struct osd_iobuf *iobuf, CDEBUG(D_OTHER, "realloc %u for %u (%u) pages\n", (unsigned)(pages * sizeof(iobuf->dr_pages[0])), i, pages); pages = i; - blocks = pages * (CFS_PAGE_SIZE >> osd_sb(d)->s_blocksize_bits); + blocks = pages * (PAGE_CACHE_SIZE >> osd_sb(d)->s_blocksize_bits); iobuf->dr_max_pages = 0; CDEBUG(D_OTHER, "realloc %u for %u blocks\n", (unsigned)(blocks * sizeof(iobuf->dr_blocks[0])), blocks); @@ -303,7 +303,7 @@ static int can_be_merged(struct bio *bio, sector_t sector) static int osd_do_bio(struct osd_device *osd, struct inode *inode, struct osd_iobuf *iobuf) { - int blocks_per_page = CFS_PAGE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; struct page **pages = iobuf->dr_pages; int npages = iobuf->dr_npages; unsigned long *blocks = iobuf->dr_blocks; @@ -444,8 +444,8 @@ static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages, *nrpages = 0; while (len > 0) { - int poff = offset & (CFS_PAGE_SIZE - 1); - int plen = CFS_PAGE_SIZE - poff; + int poff = offset & (PAGE_CACHE_SIZE - 1); + int plen = PAGE_CACHE_SIZE - poff; if (plen > len) plen = len; @@ -476,7 +476,7 @@ struct page *osd_get_page(struct dt_object *dt, loff_t offset, int rw) LASSERT(inode); - page = find_or_create_page(inode->i_mapping, offset >> CFS_PAGE_SHIFT, + page = find_or_create_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, GFP_NOFS | __GFP_HIGHMEM); if (unlikely(page == NULL)) lprocfs_counter_add(d->od_stats, LPROC_OSD_NO_PAGE, 1); @@ -590,7 +590,7 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, RETURN(rc); isize = i_size_read(inode); - maxidx = ((isize + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT) - 1; + maxidx = ((isize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - 1; if (osd->od_writethrough_cache) cache = 1; @@ -611,7 +611,7 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, */ ClearPageUptodate(lnb[i].page); - if (lnb[i].len == CFS_PAGE_SIZE) + if (lnb[i].len == PAGE_CACHE_SIZE) continue; if (maxidx >= lnb[i].page->index) { @@ -626,7 +626,7 @@ static int osd_write_prep(const struct lu_env *env, struct dt_object *dt, off = (lnb[i].lnb_page_offset + lnb[i].len) & ~CFS_PAGE_MASK; if (off) - memset(p + off, 0, CFS_PAGE_SIZE - off); + memset(p + off, 0, PAGE_CACHE_SIZE - off); kunmap(lnb[i].page); } } @@ -704,7 +704,7 @@ static int osd_declare_write_commit(const struct lu_env *env, extents++; if (!osd_is_mapped(inode, lnb[i].lnb_file_offset)) - quota_space += CFS_PAGE_SIZE; + quota_space += PAGE_CACHE_SIZE; /* ignore quota for the whole request if any page is from * client cache or written by root. diff --git a/lustre/osd-ldiskfs/osd_lproc.c b/lustre/osd-ldiskfs/osd_lproc.c index 87a8c08..251a97c 100644 --- a/lustre/osd-ldiskfs/osd_lproc.c +++ b/lustre/osd-ldiskfs/osd_lproc.c @@ -63,7 +63,7 @@ void osd_brw_stats_update(struct osd_device *osd, struct osd_iobuf *iobuf) if (unlikely(nr_pages == 0)) return; - blocks_per_page = CFS_PAGE_SIZE >> osd_sb(osd)->s_blocksize_bits; + blocks_per_page = PAGE_CACHE_SIZE >> osd_sb(osd)->s_blocksize_bits; lprocfs_oh_tally_log2(&s->hist[BRW_R_PAGES+rw], nr_pages); diff --git a/lustre/osd-ldiskfs/osd_quota_fmt.c b/lustre/osd-ldiskfs/osd_quota_fmt.c index 542f7618..6969ae5 100644 --- a/lustre/osd-ldiskfs/osd_quota_fmt.c +++ b/lustre/osd-ldiskfs/osd_quota_fmt.c @@ -40,7 +40,7 @@ static const union static inline dqbuf_t getdqbuf(void) { - dqbuf_t buf = cfs_alloc(LUSTRE_DQBLKSIZE, CFS_ALLOC_IO); + dqbuf_t buf = kmalloc(LUSTRE_DQBLKSIZE, __GFP_IO); if (!buf) CWARN("Not enough memory for quota buffers.\n"); return buf; @@ -48,7 +48,7 @@ static inline dqbuf_t getdqbuf(void) static inline void freedqbuf(dqbuf_t buf) { - cfs_free(buf); + kfree(buf); } /** diff --git a/lustre/osd-zfs/osd_handler.c b/lustre/osd-zfs/osd_handler.c index 35bbed0..df85bb54 100644 --- a/lustre/osd-zfs/osd_handler.c +++ b/lustre/osd-zfs/osd_handler.c @@ -79,7 +79,7 @@ struct lu_context_key osd_key; static char *root_tag = "osd_mount, rootdb"; /* Slab for OSD object allocation */ -cfs_mem_cache_t *osd_object_kmem; +struct kmem_cache *osd_object_kmem; static struct lu_kmem_descr osd_caches[] = { { diff --git a/lustre/osd-zfs/osd_internal.h b/lustre/osd-zfs/osd_internal.h index 15f488c..201077d 100644 --- a/lustre/osd-zfs/osd_internal.h +++ b/lustre/osd-zfs/osd_internal.h @@ -49,6 +49,15 @@ #include #include #include + +#define _SPL_KMEM_H +#include +#define kmem_zalloc(a, b) kzalloc(a, b) +#define kmem_free(ptr, sz) ((void)(sz), kfree(ptr)) +#ifndef KM_SLEEP +#define KM_SLEEP GFP_KERNEL +#endif + #include #include diff --git a/lustre/osd-zfs/osd_io.c b/lustre/osd-zfs/osd_io.c index e315fc1..c12e44b 100644 --- a/lustre/osd-zfs/osd_io.c +++ b/lustre/osd-zfs/osd_io.c @@ -242,16 +242,12 @@ static int osd_bufs_put(const struct lu_env *env, struct dt_object *dt, return 0; } -static struct page *kmem_to_page(void *addr) +static inline struct page *kmem_to_page(void *addr) { - struct page *page; - - if (kmem_virt(addr)) - page = vmalloc_to_page(addr); + if (is_vmalloc_addr(addr)) + return vmalloc_to_page(addr); else - page = virt_to_page(addr); - - return page; + return virt_to_page(addr); } static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj, @@ -292,8 +288,8 @@ static int osd_bufs_get_read(const struct lu_env *env, struct osd_object *obj, dbf = (void *) ((unsigned long)dbp[i] | 1); while (tocpy > 0) { - thispage = CFS_PAGE_SIZE; - thispage -= bufoff & (CFS_PAGE_SIZE - 1); + thispage = PAGE_CACHE_SIZE; + thispage -= bufoff & (PAGE_CACHE_SIZE - 1); thispage = min(tocpy, thispage); lnb->rc = 0; @@ -366,7 +362,7 @@ static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj, /* go over pages arcbuf contains, put them as * local niobufs for ptlrpc's bulks */ while (sz_in_block > 0) { - plen = min_t(int, sz_in_block, CFS_PAGE_SIZE); + plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE); lnb[i].lnb_file_offset = off; lnb[i].lnb_page_offset = 0; @@ -400,7 +396,7 @@ static int osd_bufs_get_write(const struct lu_env *env, struct osd_object *obj, /* can't use zerocopy, allocate temp. buffers */ while (sz_in_block > 0) { - plen = min_t(int, sz_in_block, CFS_PAGE_SIZE); + plen = min_t(int, sz_in_block, PAGE_CACHE_SIZE); lnb[i].lnb_file_offset = off; lnb[i].lnb_page_offset = 0; diff --git a/lustre/osd-zfs/osd_lproc.c b/lustre/osd-zfs/osd_lproc.c index f85accf..0854ce6 100644 --- a/lustre/osd-zfs/osd_lproc.c +++ b/lustre/osd-zfs/osd_lproc.c @@ -48,7 +48,6 @@ #include #include -#include "udmu.h" #include "osd_internal.h" #ifdef LPROCFS diff --git a/lustre/osd-zfs/osd_object.c b/lustre/osd-zfs/osd_object.c index 85732f1..b2e2fa1 100644 --- a/lustre/osd-zfs/osd_object.c +++ b/lustre/osd-zfs/osd_object.c @@ -79,7 +79,7 @@ static struct lu_object_operations osd_lu_obj_ops; extern struct dt_body_operations osd_body_ops; static struct dt_object_operations osd_obj_otable_it_ops; -extern cfs_mem_cache_t *osd_object_kmem; +extern struct kmem_cache *osd_object_kmem; static void osd_object_sa_fini(struct osd_object *obj) @@ -290,7 +290,7 @@ struct lu_object *osd_object_alloc(const struct lu_env *env, { struct osd_object *mo; - OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(mo, osd_object_kmem, __GFP_IO); if (mo != NULL) { struct lu_object *l; diff --git a/lustre/osd-zfs/osd_quota.c b/lustre/osd-zfs/osd_quota.c index 7faa287..b51be3b 100644 --- a/lustre/osd-zfs/osd_quota.c +++ b/lustre/osd-zfs/osd_quota.c @@ -29,7 +29,6 @@ #include #include -#include "udmu.h" #include "osd_internal.h" /** diff --git a/lustre/osd-zfs/udmu.h b/lustre/osd-zfs/udmu.h index 5708bde..1939147 100644 --- a/lustre/osd-zfs/udmu.h +++ b/lustre/osd-zfs/udmu.h @@ -46,7 +46,6 @@ #define _DMU_H #include -#include #include #include diff --git a/lustre/osp/osp_dev.c b/lustre/osp/osp_dev.c index cc4a1ae..aa54f94 100644 --- a/lustre/osp/osp_dev.c +++ b/lustre/osp/osp_dev.c @@ -55,7 +55,7 @@ #include "osp_internal.h" /* Slab for OSP object allocation */ -cfs_mem_cache_t *osp_object_kmem; +struct kmem_cache *osp_object_kmem; static struct lu_kmem_descr osp_caches[] = { { @@ -76,7 +76,7 @@ struct lu_object *osp_object_alloc(const struct lu_env *env, struct osp_object *o; struct lu_object *l; - OBD_SLAB_ALLOC_PTR_GFP(o, osp_object_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(o, osp_object_kmem, __GFP_IO); if (o != NULL) { l = &o->opo_obj.do_lu; diff --git a/lustre/osp/osp_internal.h b/lustre/osp/osp_internal.h index 953987f..847a100 100644 --- a/lustre/osp/osp_internal.h +++ b/lustre/osp/osp_internal.h @@ -181,7 +181,7 @@ struct osp_device { cfs_proc_dir_entry_t *opd_symlink; }; -extern cfs_mem_cache_t *osp_object_kmem; +extern struct kmem_cache *osp_object_kmem; /* this is a top object */ struct osp_object { diff --git a/lustre/osp/osp_md_object.c b/lustre/osp/osp_md_object.c index c3805f9..776877a 100644 --- a/lustre/osp/osp_md_object.c +++ b/lustre/osp/osp_md_object.c @@ -659,7 +659,7 @@ static int osp_md_xattr_get(const struct lu_env *env, struct dt_object *dt, if (size < 0) GOTO(out, rc = size); - LASSERT(size > 0 && size < CFS_PAGE_SIZE); + LASSERT(size > 0 && size < PAGE_CACHE_SIZE); LASSERT(ea_buf != NULL); rc = size; diff --git a/lustre/ost/ost_handler.c b/lustre/ost/ost_handler.c index a153b13..fc7d74d 100644 --- a/lustre/ost/ost_handler.c +++ b/lustre/ost/ost_handler.c @@ -1064,7 +1064,7 @@ static int ost_brw_write(struct ptlrpc_request *req, struct obd_trans_info *oti) if ((remote_nb[0].flags & OBD_BRW_MEMALLOC) && (exp->exp_connection->c_peer.nid == exp->exp_connection->c_self)) - cfs_memory_pressure_set(); + memory_pressure_set(); if (body->oa.o_valid & OBD_MD_FLOSSCAPA) { capa = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1); @@ -1247,7 +1247,7 @@ out: obd_uuid2str(&exp->exp_client_uuid), obd_export_nid2str(exp), rc); } - cfs_memory_pressure_clr(); + memory_pressure_clr(); RETURN(rc); } @@ -2902,7 +2902,7 @@ static int __init ost_init(void) int rc; ENTRY; - ost_page_to_corrupt = cfs_alloc_page(CFS_ALLOC_STD); + ost_page_to_corrupt = alloc_page(GFP_IOFS); lprocfs_ost_init_vars(&lvars); rc = class_register_type(&ost_obd_ops, NULL, lvars.module_vars, diff --git a/lustre/ost/ost_internal.h b/lustre/ost/ost_internal.h index 6a96053..d07b8ea 100644 --- a/lustre/ost/ost_internal.h +++ b/lustre/ost/ost_internal.h @@ -43,7 +43,7 @@ * tunables for per-thread page pool (bug 5137) */ #define OST_THREAD_POOL_SIZE PTLRPC_MAX_BRW_PAGES /* pool size in pages */ -#define OST_THREAD_POOL_GFP CFS_ALLOC_HIGHUSER /* GFP mask for pool pages */ +#define OST_THREAD_POOL_GFP GFP_HIGHUSER /* GFP mask for pool pages */ struct page; struct niobuf_local; diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index a22fe8d..4fe79db 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -162,26 +162,26 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, } EXPORT_SYMBOL(ptlrpc_prep_bulk_imp); -/** +/* * Add a page \a page to the bulk descriptor \a desc. * Data to transfer in the page starts at offset \a pageoffset and * amount of data to transfer from the page is \a len */ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, int len, int pin) + struct page *page, int pageoffset, int len, int pin) { - LASSERT(desc->bd_iov_count < desc->bd_max_iov); - LASSERT(page != NULL); - LASSERT(pageoffset >= 0); - LASSERT(len > 0); - LASSERT(pageoffset + len <= CFS_PAGE_SIZE); + LASSERT(desc->bd_iov_count < desc->bd_max_iov); + LASSERT(page != NULL); + LASSERT(pageoffset >= 0); + LASSERT(len > 0); + LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); - desc->bd_nob += len; + desc->bd_nob += len; if (pin) - cfs_page_pin(page); + page_cache_get(page); - ptlrpc_add_bulk_page(desc, page, pageoffset, len); + ptlrpc_add_bulk_page(desc, page, pageoffset, len); } EXPORT_SYMBOL(__ptlrpc_prep_bulk_page); @@ -208,7 +208,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) if (unpin) { for (i = 0; i < desc->bd_iov_count ; i++) - cfs_page_unpin(desc->bd_iov[i].kiov_page); + page_cache_release(desc->bd_iov[i].kiov_page); } OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, diff --git a/lustre/ptlrpc/events.c b/lustre/ptlrpc/events.c index 3cd88cc..7fea3cc 100644 --- a/lustre/ptlrpc/events.c +++ b/lustre/ptlrpc/events.c @@ -319,7 +319,7 @@ void request_in_callback(lnet_event_t *ev) /* We moaned above already... */ return; } - OBD_ALLOC_GFP(req, sizeof(*req), CFS_ALLOC_ATOMIC_TRY); + OBD_ALLOC_GFP(req, sizeof(*req), ALLOC_ATOMIC_TRY); if (req == NULL) { CERROR("Can't allocate incoming request descriptor: " "Dropping %s RPC from %s\n", diff --git a/lustre/ptlrpc/gss/gss_cli_upcall.c b/lustre/ptlrpc/gss/gss_cli_upcall.c index b31ff17..d6ecf7f 100644 --- a/lustre/ptlrpc/gss/gss_cli_upcall.c +++ b/lustre/ptlrpc/gss/gss_cli_upcall.c @@ -135,7 +135,7 @@ int ctx_init_pack_request(struct obd_import *imp, /* 4. now the token */ LASSERT(size >= (sizeof(__u32) + token_size)); *p++ = cpu_to_le32(((__u32) token_size)); - if (cfs_copy_from_user(p, token, token_size)) { + if (copy_from_user(p, token, token_size)) { CERROR("can't copy token\n"); return -EFAULT; } @@ -179,43 +179,43 @@ int ctx_init_parse_reply(struct lustre_msg *msg, int swabbed, status = 0; effective = 0; - if (cfs_copy_to_user(outbuf, &status, 4)) - return -EFAULT; - outbuf += 4; - if (cfs_copy_to_user(outbuf, &ghdr->gh_major, 4)) - return -EFAULT; - outbuf += 4; - if (cfs_copy_to_user(outbuf, &ghdr->gh_minor, 4)) - return -EFAULT; - outbuf += 4; - if (cfs_copy_to_user(outbuf, &ghdr->gh_seqwin, 4)) - return -EFAULT; - outbuf += 4; - effective += 4 * 4; - - /* handle */ - obj_len = ghdr->gh_handle.len; - round_len = (obj_len + 3) & ~ 3; - if (cfs_copy_to_user(outbuf, &obj_len, 4)) - return -EFAULT; - outbuf += 4; - if (cfs_copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len)) - return -EFAULT; - outbuf += round_len; - effective += 4 + round_len; - - /* out token */ - obj_len = msg->lm_buflens[2]; - round_len = (obj_len + 3) & ~ 3; - if (cfs_copy_to_user(outbuf, &obj_len, 4)) - return -EFAULT; - outbuf += 4; - if (cfs_copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len)) - return -EFAULT; - outbuf += round_len; - effective += 4 + round_len; - - return effective; + if (copy_to_user(outbuf, &status, 4)) + return -EFAULT; + outbuf += 4; + if (copy_to_user(outbuf, &ghdr->gh_major, 4)) + return -EFAULT; + outbuf += 4; + if (copy_to_user(outbuf, &ghdr->gh_minor, 4)) + return -EFAULT; + outbuf += 4; + if (copy_to_user(outbuf, &ghdr->gh_seqwin, 4)) + return -EFAULT; + outbuf += 4; + effective += 4 * 4; + + /* handle */ + obj_len = ghdr->gh_handle.len; + round_len = (obj_len + 3) & ~3; + if (copy_to_user(outbuf, &obj_len, 4)) + return -EFAULT; + outbuf += 4; + if (copy_to_user(outbuf, (char *) ghdr->gh_handle.data, round_len)) + return -EFAULT; + outbuf += round_len; + effective += 4 + round_len; + + /* out token */ + obj_len = msg->lm_buflens[2]; + round_len = (obj_len + 3) & ~3; + if (copy_to_user(outbuf, &obj_len, 4)) + return -EFAULT; + outbuf += 4; + if (copy_to_user(outbuf, lustre_msg_buf(msg, 2, 0), round_len)) + return -EFAULT; + outbuf += round_len; + effective += 4 + round_len; + + return effective; } /* XXX move to where lgssd could see */ @@ -249,7 +249,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count) "version\n", count, (unsigned long) sizeof(param)); RETURN(-EINVAL); } - if (cfs_copy_from_user(¶m, buffer, sizeof(param))) { + if (copy_from_user(¶m, buffer, sizeof(param))) { CERROR("failed copy data from lgssd\n"); RETURN(-EFAULT); } @@ -365,7 +365,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count) param.reply_length = lsize; out_copy: - if (cfs_copy_to_user(buffer, ¶m, sizeof(param))) + if (copy_to_user(buffer, ¶m, sizeof(param))) rc = -EFAULT; else rc = 0; diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 4a6430c..181549f 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -1156,9 +1156,9 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm, } if (desc->bd_iov[i].kiov_len % blocksize != 0) { - memcpy(cfs_page_address(desc->bd_iov[i].kiov_page) + + memcpy(page_address(desc->bd_iov[i].kiov_page) + desc->bd_iov[i].kiov_offset, - cfs_page_address(desc->bd_enc_iov[i].kiov_page) + + page_address(desc->bd_enc_iov[i].kiov_page) + desc->bd_iov[i].kiov_offset, desc->bd_iov[i].kiov_len); } diff --git a/lustre/ptlrpc/gss/gss_pipefs.c b/lustre/ptlrpc/gss/gss_pipefs.c index 13f852f..4e704be 100644 --- a/lustre/ptlrpc/gss/gss_pipefs.c +++ b/lustre/ptlrpc/gss/gss_pipefs.c @@ -817,7 +817,7 @@ ssize_t gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg, if (mlen > buflen) mlen = buflen; - left = cfs_copy_to_user(dst, data, mlen); + left = copy_to_user(dst, data, mlen); if (left < 0) { msg->errno = left; RETURN(left); @@ -848,7 +848,7 @@ ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen) if (!buf) RETURN(-ENOMEM); - if (cfs_copy_from_user(buf, src, mlen)) { + if (copy_from_user(buf, src, mlen)) { CERROR("failed copy user space data\n"); GOTO(out_free, rc = -EFAULT); } diff --git a/lustre/ptlrpc/import.c b/lustre/ptlrpc/import.c index 0eb47d9..62792ab 100644 --- a/lustre/ptlrpc/import.c +++ b/lustre/ptlrpc/import.c @@ -1089,7 +1089,7 @@ finish: if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) cli->cl_max_pages_per_rpc = - min(ocd->ocd_brw_size >> CFS_PAGE_SHIFT, + min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, cli->cl_max_pages_per_rpc); else if (imp->imp_connect_op == MDS_CONNECT || imp->imp_connect_op == MGS_CONNECT) diff --git a/lustre/ptlrpc/lproc_ptlrpc.c b/lustre/ptlrpc/lproc_ptlrpc.c index 93e1758..a3f16d8 100644 --- a/lustre/ptlrpc/lproc_ptlrpc.c +++ b/lustre/ptlrpc/lproc_ptlrpc.c @@ -308,8 +308,9 @@ ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer, /* This sanity check is more of an insanity check; we can still * hose a kernel by allowing the request history to grow too * far. */ - bufpages = (svc->srv_buf_size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT; - if (val > cfs_num_physpages/(2 * bufpages)) + bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> + PAGE_CACHE_SHIFT; + if (val > num_physpages/(2 * bufpages)) return -ERANGE; spin_lock(&svc->srv_lock); @@ -679,7 +680,7 @@ static int ptlrpc_lprocfs_wr_nrs(struct file *file, const char *buffer, */ cmd_copy = cmd; - if (cfs_copy_from_user(cmd, buffer, count)) + if (copy_from_user(cmd, buffer, count)) GOTO(out, rc = -EFAULT); cmd[count] = '\0'; @@ -1235,7 +1236,7 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer, * bytes into kbuf, to ensure that the string is NUL-terminated. * UUID_MAX should include a trailing NUL already. */ - if (cfs_copy_from_user(kbuf, buffer, + if (copy_from_user(kbuf, buffer, min_t(unsigned long, BUFLEN - 1, count))) { count = -EFAULT; goto out; @@ -1310,14 +1311,14 @@ int lprocfs_wr_import(struct file *file, const char *buffer, const char prefix[] = "connection="; const int prefix_len = sizeof(prefix) - 1; - if (count > CFS_PAGE_SIZE - 1 || count <= prefix_len) + if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) return -EINVAL; OBD_ALLOC(kbuf, count + 1); if (kbuf == NULL) return -ENOMEM; - if (cfs_copy_from_user(kbuf, buffer, count)) + if (copy_from_user(kbuf, buffer, count)) GOTO(out, count = -EFAULT); kbuf[count] = 0; diff --git a/lustre/ptlrpc/nrs.c b/lustre/ptlrpc/nrs.c index 5343ab3..43b05e1 100644 --- a/lustre/ptlrpc/nrs.c +++ b/lustre/ptlrpc/nrs.c @@ -769,7 +769,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs, LASSERT(desc->pd_compat != NULL); OBD_CPT_ALLOC_GFP(policy, svcpt->scp_service->srv_cptable, - svcpt->scp_cpt, sizeof(*policy), CFS_ALLOC_IO); + svcpt->scp_cpt, sizeof(*policy), __GFP_IO); if (policy == NULL) RETURN(-ENOMEM); diff --git a/lustre/ptlrpc/nrs_crr.c b/lustre/ptlrpc/nrs_crr.c index 6545913..27959cf 100644 --- a/lustre/ptlrpc/nrs_crr.c +++ b/lustre/ptlrpc/nrs_crr.c @@ -352,8 +352,8 @@ int nrs_crrn_res_get(struct ptlrpc_nrs_policy *policy, goto out; OBD_CPT_ALLOC_GFP(cli, nrs_pol2cptab(policy), nrs_pol2cptid(policy), - sizeof(*cli), moving_req ? CFS_ALLOC_ATOMIC : - CFS_ALLOC_IO); + sizeof(*cli), moving_req ? GFP_ATOMIC : + __GFP_IO); if (cli == NULL) return -ENOMEM; @@ -725,7 +725,7 @@ static int ptlrpc_lprocfs_wr_nrs_crrn_quantum(struct file *file, if (count > (sizeof(kernbuf) - 1)) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; kernbuf[count] = '\0'; diff --git a/lustre/ptlrpc/nrs_orr.c b/lustre/ptlrpc/nrs_orr.c index 5363629..a5afbc0 100644 --- a/lustre/ptlrpc/nrs_orr.c +++ b/lustre/ptlrpc/nrs_orr.c @@ -652,9 +652,9 @@ static int nrs_orr_start(struct ptlrpc_nrs_policy *policy) /** * Slab cache for NRS ORR/TRR objects. */ - orrd->od_cache = cfs_mem_cache_create(orrd->od_objname, - sizeof(struct nrs_orr_object), - 0, 0); + orrd->od_cache = kmem_cache_create(orrd->od_objname, + sizeof(struct nrs_orr_object), + 0, 0, NULL); if (orrd->od_cache == NULL) GOTO(failed, rc = -ENOMEM); @@ -702,7 +702,7 @@ static int nrs_orr_start(struct ptlrpc_nrs_policy *policy) failed: if (orrd->od_cache) { - rc = cfs_mem_cache_destroy(orrd->od_cache); + kmem_cache_destroy(orrd->od_cache); LASSERTF(rc == 0, "Could not destroy od_cache slab\n"); } if (orrd->od_binheap != NULL) @@ -735,7 +735,7 @@ static void nrs_orr_stop(struct ptlrpc_nrs_policy *policy) cfs_binheap_destroy(orrd->od_binheap); cfs_hash_putref(orrd->od_obj_hash); - cfs_mem_cache_destroy(orrd->od_cache); + kmem_cache_destroy(orrd->od_cache); OBD_FREE_PTR(orrd); } @@ -882,8 +882,8 @@ int nrs_orr_res_get(struct ptlrpc_nrs_policy *policy, OBD_SLAB_CPT_ALLOC_PTR_GFP(orro, orrd->od_cache, nrs_pol2cptab(policy), nrs_pol2cptid(policy), - (moving_req ? CFS_ALLOC_ATOMIC : - CFS_ALLOC_IO)); + (moving_req ? GFP_ATOMIC : + __GFP_IO)); if (orro == NULL) RETURN(-ENOMEM); @@ -1312,7 +1312,7 @@ static int ptlrpc_lprocfs_wr_nrs_orr_quantum(struct file *file, if (count > (sizeof(kernbuf) - 1)) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; kernbuf[count] = '\0'; @@ -1531,7 +1531,7 @@ static int ptlrpc_lprocfs_wr_nrs_orr_offset_type(struct file *file, if (count > (sizeof(kernbuf) - 1)) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; kernbuf[count] = '\0'; @@ -1793,7 +1793,7 @@ static int ptlrpc_lprocfs_wr_nrs_orr_supported(struct file *file, if (count > (sizeof(kernbuf) - 1)) return -EINVAL; - if (cfs_copy_from_user(kernbuf, buffer, count)) + if (copy_from_user(kernbuf, buffer, count)) return -EFAULT; kernbuf[count] = '\0'; diff --git a/lustre/ptlrpc/pers.c b/lustre/ptlrpc/pers.c index 2001477..97b7bf4 100644 --- a/lustre/ptlrpc/pers.c +++ b/lustre/ptlrpc/pers.c @@ -68,7 +68,7 @@ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV]; } -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page, +void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len) { lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count]; @@ -114,7 +114,7 @@ static int can_merge_iovs(lnet_md_iovec_t *existing, lnet_md_iovec_t *candidate) return 0; } -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page, +void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len) { lnet_md_iovec_t *iov = &desc->bd_iov[desc->bd_iov_count]; diff --git a/lustre/ptlrpc/ptlrpc_internal.h b/lustre/ptlrpc/ptlrpc_internal.h index fa8454c..2172c82 100644 --- a/lustre/ptlrpc/ptlrpc_internal.h +++ b/lustre/ptlrpc/ptlrpc_internal.h @@ -232,7 +232,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink); /* pers.c */ void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, int mdcnt); -void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, cfs_page_t *page, +void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len); /* pack_generic.c */ diff --git a/lustre/ptlrpc/recover.c b/lustre/ptlrpc/recover.c index 9b8e8f8c..1dd4533 100644 --- a/lustre/ptlrpc/recover.c +++ b/lustre/ptlrpc/recover.c @@ -172,7 +172,7 @@ int ptlrpc_resend(struct obd_import *imp) cfs_list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { - LASSERTF((long)req > CFS_PAGE_SIZE && req != LP_POISON, + LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, "req %p bad\n", req); LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); if (!ptlrpc_no_resend(req)) diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index c6fe4ce..ea7a3dd 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -65,7 +65,7 @@ #ifdef __KERNEL__ -#define PTRS_PER_PAGE (CFS_PAGE_SIZE / sizeof(void *)) +#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) #define PAGES_PER_POOL (PTRS_PER_PAGE) #define IDLE_IDX_MAX (100) @@ -120,17 +120,17 @@ static struct ptlrpc_enc_page_pool { unsigned long epp_st_lowfree; /* lowest free pages reached */ unsigned int epp_st_max_wqlen; /* highest waitqueue length */ cfs_time_t epp_st_max_wait; /* in jeffies */ - /* - * pointers to pools - */ - cfs_page_t ***epp_pools; + /* + * pointers to pools + */ + struct page ***epp_pools; } page_pools; /* * memory shrinker */ -const int pools_shrinker_seeks = CFS_DEFAULT_SEEKS; -static struct cfs_shrinker *pools_shrinker = NULL; +const int pools_shrinker_seeks = DEFAULT_SEEKS; +static struct shrinker *pools_shrinker; /* @@ -163,7 +163,7 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count, "max waitqueue depth: %u\n" "max wait time: "CFS_TIME_T"/%u\n" , - cfs_num_physpages, + num_physpages, PAGES_PER_POOL, page_pools.epp_max_pages, page_pools.epp_max_pools, @@ -214,7 +214,7 @@ static void enc_pools_release_free_pages(long npages) LASSERT(page_pools.epp_pools[p_idx]); LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); - cfs_free_page(page_pools.epp_pools[p_idx][g_idx]); + __free_page(page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = NULL; if (++g_idx == PAGES_PER_POOL) { @@ -226,7 +226,7 @@ static void enc_pools_release_free_pages(long npages) /* free unused pools */ while (p_idx_max1 < p_idx_max2) { LASSERT(page_pools.epp_pools[p_idx_max2]); - OBD_FREE(page_pools.epp_pools[p_idx_max2], CFS_PAGE_SIZE); + OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE); page_pools.epp_pools[p_idx_max2] = NULL; p_idx_max2--; } @@ -282,25 +282,25 @@ int npages_to_npools(unsigned long npages) /* * return how many pages cleaned up. */ -static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools) +static unsigned long enc_pools_cleanup(struct page ***pools, int npools) { - unsigned long cleaned = 0; - int i, j; - - for (i = 0; i < npools; i++) { - if (pools[i]) { - for (j = 0; j < PAGES_PER_POOL; j++) { - if (pools[i][j]) { - cfs_free_page(pools[i][j]); - cleaned++; - } - } - OBD_FREE(pools[i], CFS_PAGE_SIZE); - pools[i] = NULL; - } - } + unsigned long cleaned = 0; + int i, j; + + for (i = 0; i < npools; i++) { + if (pools[i]) { + for (j = 0; j < PAGES_PER_POOL; j++) { + if (pools[i][j]) { + __free_page(pools[i][j]); + cleaned++; + } + } + OBD_FREE(pools[i], PAGE_CACHE_SIZE); + pools[i] = NULL; + } + } - return cleaned; + return cleaned; } /* @@ -310,7 +310,7 @@ static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools) * we have options to avoid most memory copy with some tricks. but we choose * the simplest way to avoid complexity. It's not frequently called. */ -static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages) +static void enc_pools_insert(struct page ***pools, int npools, int npages) { int freeslot; int op_idx, np_idx, og_idx, ng_idx; @@ -394,7 +394,7 @@ static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages) static int enc_pools_add_pages(int npages) { static DEFINE_MUTEX(add_pages_mutex); - cfs_page_t ***pools; + struct page ***pools; int npools, alloced = 0; int i, j, rc = -ENOMEM; @@ -414,21 +414,21 @@ static int enc_pools_add_pages(int npages) if (pools == NULL) goto out; - for (i = 0; i < npools; i++) { - OBD_ALLOC(pools[i], CFS_PAGE_SIZE); - if (pools[i] == NULL) - goto out_pools; + for (i = 0; i < npools; i++) { + OBD_ALLOC(pools[i], PAGE_CACHE_SIZE); + if (pools[i] == NULL) + goto out_pools; - for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) { - pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO | - CFS_ALLOC_HIGHMEM); - if (pools[i][j] == NULL) - goto out_pools; + for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) { + pools[i][j] = alloc_page(__GFP_IO | + __GFP_HIGHMEM); + if (pools[i][j] == NULL) + goto out_pools; - alloced++; - } - } - LASSERT(alloced == npages); + alloced++; + } + } + LASSERT(alloced == npages); enc_pools_insert(pools, npools, npages); CDEBUG(D_SEC, "added %d pages into pools\n", npages); @@ -712,7 +712,7 @@ int sptlrpc_enc_pool_init(void) * maximum capacity is 1/8 of total physical memory. * is the 1/8 a good number? */ - page_pools.epp_max_pages = cfs_num_physpages / 8; + page_pools.epp_max_pages = num_physpages / 8; page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages); cfs_waitq_init(&page_pools.epp_waitq); @@ -743,7 +743,7 @@ int sptlrpc_enc_pool_init(void) if (page_pools.epp_pools == NULL) return -ENOMEM; - pools_shrinker = cfs_set_shrinker(pools_shrinker_seeks, + pools_shrinker = set_shrinker(pools_shrinker_seeks, enc_pools_shrink); if (pools_shrinker == NULL) { enc_pools_free(); @@ -761,7 +761,7 @@ void sptlrpc_enc_pool_fini(void) LASSERT(page_pools.epp_pools); LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages); - cfs_remove_shrinker(pools_shrinker); + remove_shrinker(pools_shrinker); npools = npages_to_npools(page_pools.epp_total_pages); cleaned = enc_pools_cleanup(page_pools.epp_pools, npools); diff --git a/lustre/ptlrpc/sec_plain.c b/lustre/ptlrpc/sec_plain.c index 8c1ef9c..ec9ee7b 100644 --- a/lustre/ptlrpc/sec_plain.c +++ b/lustre/ptlrpc/sec_plain.c @@ -158,19 +158,19 @@ static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc, #ifdef __KERNEL__ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) { - char *ptr; - unsigned int off, i; - - for (i = 0; i < desc->bd_iov_count; i++) { - if (desc->bd_iov[i].kiov_len == 0) - continue; - - ptr = cfs_kmap(desc->bd_iov[i].kiov_page); - off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK; - ptr[off] ^= 0x1; - cfs_kunmap(desc->bd_iov[i].kiov_page); - return; - } + char *ptr; + unsigned int off, i; + + for (i = 0; i < desc->bd_iov_count; i++) { + if (desc->bd_iov[i].kiov_len == 0) + continue; + + ptr = kmap(desc->bd_iov[i].kiov_page); + off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK; + ptr[off] ^= 0x1; + kunmap(desc->bd_iov[i].kiov_page); + return; + } } #else static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc) diff --git a/lustre/quota/lquota_entry.c b/lustre/quota/lquota_entry.c index f1d8117..cff6b6d 100644 --- a/lustre/quota/lquota_entry.c +++ b/lustre/quota/lquota_entry.c @@ -326,7 +326,7 @@ struct lquota_entry *lqe_locate(const struct lu_env *env, RETURN(lqe); } - OBD_SLAB_ALLOC_PTR_GFP(new, lqe_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(new, lqe_kmem, __GFP_IO); if (new == NULL) { CERROR("Fail to allocate lqe for id:"LPU64", " "hash:%s\n", qid->qid_uid, site->lqs_hash->hs_name); diff --git a/lustre/quota/lquota_internal.h b/lustre/quota/lquota_internal.h index 1e53054..f16a5e2 100644 --- a/lustre/quota/lquota_internal.h +++ b/lustre/quota/lquota_internal.h @@ -364,7 +364,7 @@ struct dt_object *acct_obj_lookup(const struct lu_env *, struct dt_device *, void lquota_generate_fid(struct lu_fid *, int, int, int); int lquota_extract_fid(const struct lu_fid *, int *, int *, int *); const struct dt_index_features *glb_idx_feature(struct lu_fid *); -extern cfs_mem_cache_t *lqe_kmem; +extern struct kmem_cache *lqe_kmem; /* lquota_entry.c */ /* site create/destroy */ diff --git a/lustre/quota/lquota_lib.c b/lustre/quota/lquota_lib.c index f7d2bc4..c65465c 100644 --- a/lustre/quota/lquota_lib.c +++ b/lustre/quota/lquota_lib.c @@ -40,7 +40,7 @@ #include "lquota_internal.h" -cfs_mem_cache_t *lqe_kmem; +struct kmem_cache *lqe_kmem; struct lu_kmem_descr lquota_caches[] = { { diff --git a/lustre/quota/qsd_internal.h b/lustre/quota/qsd_internal.h index 4f8e9fc..a518f4a 100644 --- a/lustre/quota/qsd_internal.h +++ b/lustre/quota/qsd_internal.h @@ -362,7 +362,7 @@ int qsd_intent_lock(const struct lu_env *, struct obd_export *, struct quota_body *, bool, int, qsd_req_completion_t, struct qsd_qtype_info *, struct lquota_lvb *, void *); int qsd_fetch_index(const struct lu_env *, struct obd_export *, - struct idx_info *, unsigned int, cfs_page_t **, bool *); + struct idx_info *, unsigned int, struct page **, bool *); /* qsd_writeback.c */ void qsd_bump_version(struct qsd_qtype_info *, __u64, bool); diff --git a/lustre/quota/qsd_lib.c b/lustre/quota/qsd_lib.c index 7623aa7..dea0cbd 100644 --- a/lustre/quota/qsd_lib.c +++ b/lustre/quota/qsd_lib.c @@ -58,7 +58,7 @@ #include #include "qsd_internal.h" -cfs_mem_cache_t *upd_kmem; +struct kmem_cache *upd_kmem; struct lu_kmem_descr qsd_caches[] = { { diff --git a/lustre/quota/qsd_reint.c b/lustre/quota/qsd_reint.c index 6fec9e6..15fc40f 100644 --- a/lustre/quota/qsd_reint.c +++ b/lustre/quota/qsd_reint.c @@ -98,7 +98,7 @@ out: static int qsd_reint_entries(const struct lu_env *env, struct qsd_qtype_info *qqi, struct idx_info *ii, bool global, - cfs_page_t **pages, + struct page **pages, unsigned int npages, bool need_swab) { struct qsd_thread_info *qti = qsd_info(env); @@ -123,7 +123,7 @@ static int qsd_reint_entries(const struct lu_env *env, size = ii->ii_recsize + ii->ii_keysize; for (i = 0; i < npages; i++) { - union lu_page *lip = cfs_kmap(pages[i]); + union lu_page *lip = kmap(pages[i]); for (j = 0; j < LU_PAGE_COUNT; j++) { if (need_swab) @@ -173,7 +173,7 @@ static int qsd_reint_entries(const struct lu_env *env, lip++; } out: - cfs_kunmap(pages[i]); + kunmap(pages[i]); if (rc) break; } @@ -187,7 +187,7 @@ static int qsd_reint_index(const struct lu_env *env, struct qsd_qtype_info *qqi, struct qsd_instance *qsd = qqi->qqi_qsd; struct idx_info *ii = &qti->qti_ii; struct lu_fid *fid; - cfs_page_t **pages = NULL; + struct page **pages = NULL; unsigned int npages, pg_cnt; __u64 start_hash = 0, ver = 0; bool need_swab = false; @@ -198,14 +198,14 @@ static int qsd_reint_index(const struct lu_env *env, struct qsd_qtype_info *qqi, /* let's do a 1MB bulk */ npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20); - npages /= CFS_PAGE_SIZE; + npages /= PAGE_CACHE_SIZE; /* allocate pages for bulk index read */ OBD_ALLOC(pages, npages * sizeof(*pages)); if (pages == NULL) GOTO(out, rc = -ENOMEM); for (i = 0; i < npages; i++) { - pages[i] = cfs_alloc_page(CFS_ALLOC_STD); + pages[i] = alloc_page(GFP_IOFS); if (pages[i] == NULL) GOTO(out, rc = -ENOMEM); } @@ -258,7 +258,7 @@ repeat: ver = ii->ii_version; pg_cnt = (ii->ii_count + (LU_PAGE_COUNT) - 1); - pg_cnt >>= CFS_PAGE_SHIFT - LU_PAGE_SHIFT; + pg_cnt >>= PAGE_CACHE_SHIFT - LU_PAGE_SHIFT; if (pg_cnt > npages) { CERROR("%s: master returned more pages than expected, %u > %u" @@ -278,7 +278,7 @@ out: if (pages != NULL) { for (i = 0; i < npages; i++) if (pages[i] != NULL) - cfs_free_page(pages[i]); + __free_page(pages[i]); OBD_FREE(pages, npages * sizeof(*pages)); } diff --git a/lustre/quota/qsd_request.c b/lustre/quota/qsd_request.c index df128b3..2db0433 100644 --- a/lustre/quota/qsd_request.c +++ b/lustre/quota/qsd_request.c @@ -352,7 +352,7 @@ out: */ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp, struct idx_info *ii, unsigned int npages, - cfs_page_t **pages, bool *need_swab) + struct page **pages, bool *need_swab) { struct ptlrpc_request *req; struct idx_info *req_ii; @@ -385,7 +385,7 @@ int qsd_fetch_index(const struct lu_env *env, struct obd_export *exp, /* req now owns desc and will free it when it gets freed */ for (i = 0; i < npages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, CFS_PAGE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); /* pack index information in request */ req_ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO); diff --git a/lustre/quota/qsd_writeback.c b/lustre/quota/qsd_writeback.c index f9af987..828ff43 100644 --- a/lustre/quota/qsd_writeback.c +++ b/lustre/quota/qsd_writeback.c @@ -36,7 +36,7 @@ #include "qsd_internal.h" -extern cfs_mem_cache_t *upd_kmem; +extern struct kmem_cache *upd_kmem; /* * Allocate and fill an qsd_upd_rec structure to be processed by the writeback @@ -58,7 +58,7 @@ static struct qsd_upd_rec *qsd_upd_alloc(struct qsd_qtype_info *qqi, { struct qsd_upd_rec *upd; - OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, CFS_ALLOC_IO); + OBD_SLAB_ALLOC_PTR_GFP(upd, upd_kmem, __GFP_IO); if (upd == NULL) { CERROR("Failed to allocate upd"); return NULL; diff --git a/lustre/tests/checkfiemap.c b/lustre/tests/checkfiemap.c index 074f145..b28beb6 100644 --- a/lustre/tests/checkfiemap.c +++ b/lustre/tests/checkfiemap.c @@ -40,6 +40,7 @@ #include #ifndef HAVE_FIEMAP +# include # include #endif diff --git a/lustre/utils/lustre_cfg.c b/lustre/utils/lustre_cfg.c index 9988dab..5b4c0ec 100644 --- a/lustre/utils/lustre_cfg.c +++ b/lustre/utils/lustre_cfg.c @@ -830,11 +830,11 @@ static int getparam_display(struct param_opts *popt, char *pattern) return -ESRCH; } - buf = malloc(CFS_PAGE_SIZE); - for (i = 0; i < glob_info.gl_pathc; i++) { - char *valuename = NULL; + buf = malloc(PAGE_CACHE_SIZE); + for (i = 0; i < glob_info.gl_pathc; i++) { + char *valuename = NULL; - memset(buf, 0, CFS_PAGE_SIZE); + memset(buf, 0, PAGE_CACHE_SIZE); /* As listparam_display is used to show param name (with type), * here "if (only_path)" is ignored.*/ if (popt->show_path) { @@ -851,10 +851,10 @@ static int getparam_display(struct param_opts *popt, char *pattern) continue; } - do { - rc = read(fd, buf, CFS_PAGE_SIZE); - if (rc == 0) - break; + do { + rc = read(fd, buf, PAGE_CACHE_SIZE); + if (rc == 0) + break; if (rc < 0) { fprintf(stderr, "error: get_param: " "read('%s') failed: %s\n", -- 1.8.3.1