Whamcloud - gitweb
LU-1346 libcfs: replace cfs_ memory wrappers 31/2831/13
authorPeng Tao <tao.peng@emc.com>
Mon, 17 Jun 2013 09:33:12 +0000 (17:33 +0800)
committerOleg Drokin <oleg.drokin@intel.com>
Fri, 28 Jun 2013 19:01:34 +0000 (19:01 +0000)
Replace memory relevant wrappers with kernel API.

Affected primitives:
CFS_PAGE_SIZE, CFS_PAGE_SHIFT, cfs_num_physpages,
cfs_copy_from_user, cfs_copy_to_user, cfs_page_address,
cfs_kmap/cfs_kunmap, cfs_get_page, cfs_page_count,
cfs_page_index, cfs_page_pin, cfs_page_unpin,
cfs_memory_pressure_get/set/clr, CFS_NUM_CACHEPAGES,
CFS_ALLOC_XXX flags, cfs_alloc/free, cfs_alloc/free_large,
cfs_alloc/free_page, CFS_DECL_MMSPACE, CFS_MMSPACE_OPEN,
CFS_MMSPACE_CLOSE, CFS_SLAB_XXX flags, cfs_shrinker_t,
cfs_set/remove_shrinker, CFS_DEFAULT_SEEKS, cfs_mem_cache_t,
cfs_mem_cache_alloc/free/create/destroy, cfs_mem_is_in_cache

manual changes:
1. cfs_alloc_flags_to_gfp() is removed
2. remove kmalloc/kfree etc. from linux-mem.c and linux-mem.h
3. remove page_address/kmap/kunmap etc. from linux-mem.h
4. remove page_cache_get/page_cache_release from echo_internal.h. They
are defined already in user-mem.h
5. change kmem_cache_create/destroy prototype to kernel's and modify
all callers to match them
6. define _SPL_KMEM_H and related macros to avoid using spl's
sys/kmem.h that redefines slab allocator
7. change kmem_virt to is_vmalloc_addr as provided by kernel, so that
we don't use any spl's sys/kmem.h functions
8. clean up include files a little bit in osd-zfs
9. various coding style cleanup

NUMA allocators(cfs_cpt_xxx) are not changed in this patch.
gnilnd is not converted, as requested by James Simmons.

Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Change-Id: Iadfbb0d5a0e31c78dd6c811e5ffdb468fa7e6f44
Reviewed-on: http://review.whamcloud.com/2831
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
212 files changed:
contrib/scripts/libcfs_cleanup.sed
libcfs/include/libcfs/darwin/darwin-mem.h
libcfs/include/libcfs/libcfs.h
libcfs/include/libcfs/libcfs_crypto.h
libcfs/include/libcfs/libcfs_prim.h
libcfs/include/libcfs/libcfs_private.h
libcfs/include/libcfs/libcfs_string.h
libcfs/include/libcfs/linux/kp30.h
libcfs/include/libcfs/linux/linux-mem.h
libcfs/include/libcfs/posix/libcfs.h
libcfs/include/libcfs/user-mem.h
libcfs/include/libcfs/winnt/portals_utils.h
libcfs/include/libcfs/winnt/winnt-mem.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/include/libcfs/winnt/winnt-tcpip.h
libcfs/libcfs/darwin/darwin-mem.c
libcfs/libcfs/darwin/darwin-tcpip.c
libcfs/libcfs/darwin/darwin-tracefile.c
libcfs/libcfs/debug.c
libcfs/libcfs/heap.c
libcfs/libcfs/kernel_user_comm.c
libcfs/libcfs/libcfs_string.c
libcfs/libcfs/linux/linux-crypto.c
libcfs/libcfs/linux/linux-curproc.c
libcfs/libcfs/linux/linux-mem.c
libcfs/libcfs/linux/linux-tcpip.c
libcfs/libcfs/linux/linux-tracefile.c
libcfs/libcfs/lwt.c
libcfs/libcfs/module.c
libcfs/libcfs/posix/posix-debug.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/tracefile.h
libcfs/libcfs/user-crypto.c
libcfs/libcfs/user-mem.c
libcfs/libcfs/winnt/winnt-curproc.c
libcfs/libcfs/winnt/winnt-fs.c
libcfs/libcfs/winnt/winnt-mem.c
libcfs/libcfs/winnt/winnt-module.c
libcfs/libcfs/winnt/winnt-prim.c
libcfs/libcfs/winnt/winnt-proc.c
libcfs/libcfs/winnt/winnt-tcpip.c
libcfs/libcfs/winnt/winnt-tracefile.c
libcfs/libcfs/winnt/winnt-usr.c
libcfs/libcfs/winnt/winnt-utils.c
lnet/include/lnet/darwin/lib-types.h
lnet/include/lnet/types.h
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/ptllnd/ptllnd.c
lnet/klnds/ptllnd/ptllnd.h
lnet/klnds/ptllnd/ptllnd_rx_buf.c
lnet/klnds/socklnd/socklnd_lib-darwin.c
lnet/klnds/socklnd/socklnd_lib-linux.c
lnet/lnet/api-ni.c
lnet/lnet/lib-md.c
lnet/lnet/lib-move.c
lnet/lnet/router.c
lnet/lnet/router_proc.c
lnet/selftest/brw_test.c
lnet/selftest/conctl.c
lnet/selftest/conrpc.c
lnet/selftest/console.c
lnet/selftest/framework.c
lnet/selftest/rpc.c
lnet/selftest/selftest.h
lnet/utils/lst.c
lustre/fld/fld_cache.c
lustre/include/cl_object.h
lustre/include/lclient.h
lustre/include/liblustre.h
lustre/include/lu_object.h
lustre/include/lustre/lustre_idl.h
lustre/include/lustre_capa.h
lustre/include/lustre_debug.h
lustre/include/lustre_disk.h
lustre/include/lustre_idmap.h
lustre/include/lustre_lib.h
lustre/include/lustre_net.h
lustre/include/obd.h
lustre/include/obd_class.h
lustre/include/obd_support.h
lustre/lclient/lcommon_cl.c
lustre/ldlm/ldlm_extent.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/lfsck/lfsck_namespace.c
lustre/liblustre/dir.c
lustre/liblustre/llite_cl.c
lustre/liblustre/llite_lib.h
lustre/liblustre/super.c
lustre/liblustre/tests/sanity.c
lustre/llite/dir.c
lustre/llite/file.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/llite_mmap.c
lustre/llite/lloop.c
lustre/llite/lproc_llite.c
lustre/llite/remote_perm.c
lustre/llite/rw.c
lustre/llite/rw26.c
lustre/llite/super25.c
lustre/llite/vvp_dev.c
lustre/llite/vvp_internal.h
lustre/llite/vvp_io.c
lustre/llite/vvp_page.c
lustre/lmv/lmv_obd.c
lustre/lmv/lproc_lmv.c
lustre/lod/lod_dev.c
lustre/lod/lod_object.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_dev.c
lustre/lov/lov_ea.c
lustre/lov/lov_internal.h
lustre/lov/lov_lock.c
lustre/lov/lov_obd.c
lustre/lov/lov_object.c
lustre/lov/lov_pack.c
lustre/lov/lov_page.c
lustre/lov/lovsub_dev.c
lustre/lov/lovsub_lock.c
lustre/lov/lovsub_object.c
lustre/lov/lovsub_page.c
lustre/lvfs/fsfilt_ext3.c
lustre/mdc/mdc_request.c
lustre/mdd/mdd_device.c
lustre/mdd/mdd_dir.c
lustre/mdd/mdd_lproc.c
lustre/mdd/mdd_object.c
lustre/mdt/mdt_handler.c
lustre/mdt/mdt_lproc.c
lustre/mgc/mgc_request.c
lustre/mgs/mgs_handler.c
lustre/mgs/mgs_nids.c
lustre/obdclass/capa.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_object.c
lustre/obdclass/cl_page.c
lustre/obdclass/class_obd.c
lustre/obdclass/debug.c
lustre/obdclass/dt_object.c
lustre/obdclass/genops.c
lustre/obdclass/linkea.c
lustre/obdclass/linux/linux-module.c
lustre/obdclass/linux/linux-obdo.c
lustre/obdclass/linux/linux-sysctl.c
lustre/obdclass/lprocfs_status.c
lustre/obdclass/lu_object.c
lustre/obdclass/lu_ref.c
lustre/obdecho/echo.c
lustre/obdecho/echo_client.c
lustre/obdecho/echo_internal.h
lustre/ofd/ofd_dev.c
lustre/ofd/ofd_fmd.c
lustre/ofd/ofd_internal.h
lustre/ofd/ofd_obd.c
lustre/osc/lproc_osc.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_dev.c
lustre/osc/osc_internal.h
lustre/osc/osc_io.c
lustre/osc/osc_lock.c
lustre/osc/osc_object.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c
lustre/osd-ldiskfs/osd_handler.c
lustre/osd-ldiskfs/osd_internal.h
lustre/osd-ldiskfs/osd_io.c
lustre/osd-ldiskfs/osd_lproc.c
lustre/osd-ldiskfs/osd_quota_fmt.c
lustre/osd-zfs/osd_handler.c
lustre/osd-zfs/osd_internal.h
lustre/osd-zfs/osd_io.c
lustre/osd-zfs/osd_lproc.c
lustre/osd-zfs/osd_object.c
lustre/osd-zfs/osd_quota.c
lustre/osd-zfs/udmu.h
lustre/osp/osp_dev.c
lustre/osp/osp_internal.h
lustre/osp/osp_md_object.c
lustre/ost/ost_handler.c
lustre/ost/ost_internal.h
lustre/ptlrpc/client.c
lustre/ptlrpc/events.c
lustre/ptlrpc/gss/gss_cli_upcall.c
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/gss_pipefs.c
lustre/ptlrpc/import.c
lustre/ptlrpc/lproc_ptlrpc.c
lustre/ptlrpc/nrs.c
lustre/ptlrpc/nrs_crr.c
lustre/ptlrpc/nrs_orr.c
lustre/ptlrpc/pers.c
lustre/ptlrpc/ptlrpc_internal.h
lustre/ptlrpc/recover.c
lustre/ptlrpc/sec_bulk.c
lustre/ptlrpc/sec_plain.c
lustre/quota/lquota_entry.c
lustre/quota/lquota_internal.h
lustre/quota/lquota_lib.c
lustre/quota/qsd_internal.h
lustre/quota/qsd_lib.c
lustre/quota/qsd_reint.c
lustre/quota/qsd_request.c
lustre/quota/qsd_writeback.c
lustre/tests/checkfiemap.c
lustre/utils/lustre_cfg.c

index 61a8253..7aea56f 100644 (file)
@@ -256,91 +256,100 @@ s/\bCFS_DTTOIF\b/DTTOIF/g
 
 ################################################################################
 # memory operations
-
-#s/\bcfs_page_t\b/struct page/g
-#s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
-#s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
-#s/\bCFS_PAGE_MASK\b/PAGE_CACHE_MASK/g
-#/#[ \t]*define[ \t]*\bPAGE_CACHE_MASK\b[ \t]*\bPAGE_CACHE_MASK\b/d
-#s/\bcfs_num_physpages\b/num_physpages/g
-#/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
-#s/\bcfs_copy_from_user\b/copy_from_user/g
-#/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
-#s/\bcfs_copy_to_user\b/copy_to_user/g
-#/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
-#s/\bcfs_page_address\b/page_address/g
-#/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
-#s/\bcfs_kmap\b/kmap/g
-#/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
-#s/\bcfs_kunmap\b/kunmap/g
-#/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
-#s/\bcfs_get_page\b/get_page/g
-#/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
-#s/\bcfs_page_count\b/page_count/g
-#/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
-#s/\bcfs_page_index\b/page_index/g
-#/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
-#s/\bcfs_page_pin\b/page_cache_get/g
-#/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
-#s/\bcfs_page_unpin\b/page_cache_release/g
-#/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
-#s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
-#s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
-#s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
-#s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
-# memory allocator
-#s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
-#/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
-#s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
-#/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
-#s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
-#/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
-#s/\bCFS_ALLOC_FS\b/__GFP_FS/g
-#/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
-#s/\bCFS_ALLOC_IO\b/__GFP_IO/g
-#/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
-#s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
-#/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
-#s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
-#/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
-#s/\bCFS_ALLOC_USER\b/GFP_KERNEL/g
-#/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
-#s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
-#/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
-#s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
-#/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
-#s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
-#s/\bcfs_alloc\b/kmalloc/g
-#/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
-#s/\bcfs_free\b/kfree/g
-#/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
-#s/\bcfs_alloc_large\b/vmalloc/g
-#/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
-#s/\bcfs_free_large\b/vfree/g
-#/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
-#s/\bcfs_alloc_page\b/alloc_page/g
-#/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
-#s/\bcfs_free_page\b/__free_page/g
-#/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
+s/\bcfs_page_t\b/struct page/g
+/typedef[ \t]*\bstruct page\b[ \t]*\bstruct page\b/d
+s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
+/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
+s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
+/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
+s/\bcfs_num_physpages\b/num_physpages/g
+/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
+s/\bcfs_copy_from_user\b/copy_from_user/g
+/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_copy_to_user\b/copy_to_user/g
+/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_page_address\b/page_address/g
+/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
+s/\bcfs_kmap\b/kmap/g
+/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
+s/\bcfs_kunmap\b/kunmap/g
+/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
+s/\bcfs_get_page\b/get_page/g
+/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
+s/\bcfs_page_count\b/page_count/g
+/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
+s/\bcfs_page_index\b/page_index/g
+/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
+s/\bcfs_page_pin\b/page_cache_get/g
+/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
+s/\bcfs_page_unpin\b/page_cache_release/g
+/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
+s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
+s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
+s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
+s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
+ # memory allocator
+s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
+/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
+s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
+/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
+s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
+/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
+s/\bCFS_ALLOC_FS\b/__GFP_FS/g
+/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
+s/\bCFS_ALLOC_IO\b/__GFP_IO/g
+/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
+s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
+/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
+s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
+/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
+s/\bCFS_ALLOC_USER\b/GFP_USER/g
+/#[ \t]*define[ \t]*\bGFP_USER\b[ \t]*\bGFP_USER\b/d
+s/\bCFS_ALLOC_KERNEL\b/GFP_KERNEL/g
+/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
+s/\bCFS_ALLOC_NOFS\b/GFP_NOFS/g
+/#[ \t]*define[ \t]*\bGFP_NOFS\b[ \t]*\bGFP_NOFS\b/d
+s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
+/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
+s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
+/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
+s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
+s/\bcfs_alloc\b/kmalloc/g
+/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
+s/\bcfs_free\b/kfree/g
+/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
+s/\bcfs_alloc_large\b/vmalloc/g
+/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
+s/\bcfs_free_large\b/vfree/g
+/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
+s/\bcfs_alloc_page\b/alloc_page/g
+/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
+s/\bcfs_free_page\b/__free_page/g
+/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
 # TODO: SLAB allocator
-#s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
-#s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
-#s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
-#s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
-#/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
-#s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
-#/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
-#s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
-#/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
-#s/\bcfs_shrinker\b/shrinker/g
-#/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
-#s/\bcfs_shrinker_t\b/struct shrinkert/g
-#/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
-#s/\bcfs_set_shrinker\b/set_shrinker/g
-#/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
-#s/\bcfs_remove_shrinker\b/remove_shrinker/g
-#/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
-#s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
-#/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
+s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
+s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
+s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
+/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
+s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
+/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
+s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
+/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
+s/\bcfs_shrinker\b/shrinker/g
+/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
+s/\bcfs_shrinker_t\b/shrinker_t/g
+/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
+s/\bcfs_set_shrinker\b/set_shrinker/g
+/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
+s/\bcfs_remove_shrinker\b/remove_shrinker/g
+/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
+s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
+/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+s/cfs_mem_cache_t/struct kmem_cache/g
+s/cfs_mem_cache_create/kmem_cache_create/g
+s/\w+[ =]*cfs_mem_cache_destroy/kmem_cache_destroy/g
+s/cfs_mem_cache_destroy/kmem_cache_destroy/g
+s/cfs_mem_cache_alloc/kmem_cache_alloc/g
+s/cfs_mem_cache_free/kmem_cache_free/g
+s/cfs_mem_is_in_cache/kmem_is_in_cache/g
index ebaf064..326268b 100644 (file)
 /* Variable sized pages are not supported */
 
 #ifdef PAGE_SHIFT
-#define CFS_PAGE_SHIFT PAGE_SHIFT
+#define PAGE_CACHE_SHIFT       PAGE_SHIFT
 #else
-#define CFS_PAGE_SHIFT 12
+#define PAGE_CACHE_SHIFT       12
 #endif
 
-#define CFS_PAGE_SIZE  (1UL << CFS_PAGE_SHIFT)
+#define PAGE_CACHE_SIZE        (1UL << PAGE_CACHE_SHIFT)
 
-#define CFS_PAGE_MASK  (~((__u64)CFS_PAGE_SIZE - 1))
+#define CFS_PAGE_MASK  (~((__u64)PAGE_CACHE_SIZE - 1))
 
 enum {
        XNU_PAGE_RAW,
@@ -101,23 +101,23 @@ typedef __u32 page_off_t;
  *    - "xll" pages (XNU_PAGE_XLL): these are used by file system to cache
  *    file data, owned by file system objects, hashed, lrued, etc.
  *
- * cfs_page_t has to cover both of them, because core Lustre code is based on
+ * struct page has to cover both of them, because core Lustre code is based on
  * the Linux assumption that page is _both_ memory buffer and file system
  * caching entity.
  *
  * To achieve this, all types of pages supported on XNU has to start from
- * common header that contains only "page type". Common cfs_page_t operations
+ * common header that contains only "page type". Common struct page operations
  * dispatch through operation vector based on page type.
  *
  */
 typedef struct xnu_page {
        int type;
-} cfs_page_t;
+} struct page;
 
 struct xnu_page_ops {
-       void *(*page_map)        (cfs_page_t *);
-       void  (*page_unmap)      (cfs_page_t *);
-       void *(*page_address)    (cfs_page_t *);
+       void *(*page_map)        (struct page *);
+       void  (*page_unmap)      (struct page *);
+       void *(*page_address)    (struct page *);
 };
 
 void xnu_page_ops_register(int type, struct xnu_page_ops *ops);
@@ -136,44 +136,81 @@ struct xnu_raw_page {
 /*
  * Public interface to lustre
  *
- * - cfs_alloc_page(f)
- * - cfs_free_page(p)
- * - cfs_kmap(p)
- * - cfs_kunmap(p)
- * - cfs_page_address(p)
+ * - alloc_page(f)
+ * - __free_page(p)
+ * - kmap(p)
+ * - kunmap(p)
+ * - page_address(p)
  */
 
 /*
- * Of all functions above only cfs_kmap(), cfs_kunmap(), and
- * cfs_page_address() can be called on file system pages. The rest is for raw
+ * Of all functions above only kmap(), kunmap(), and
+ * page_address() can be called on file system pages. The rest is for raw
  * pages only.
  */
 
-cfs_page_t *cfs_alloc_page(u_int32_t flags);
-void cfs_free_page(cfs_page_t *page);
-void cfs_get_page(cfs_page_t *page);
-int cfs_put_page_testzero(cfs_page_t *page);
-int cfs_page_count(cfs_page_t *page);
-#define cfs_page_index(pg)     (0)
+struct page *alloc_page(u_int32_t flags);
+void __free_page(struct page *page);
+void get_page(struct page *page);
+int cfs_put_page_testzero(struct page *page);
+int page_count(struct page *page);
+#define page_index(pg) (0)
 
-void *cfs_page_address(cfs_page_t *pg);
-void *cfs_kmap(cfs_page_t *pg);
-void cfs_kunmap(cfs_page_t *pg);
+void *page_address(struct page *pg);
+void *kmap(struct page *pg);
+void kunmap(struct page *pg);
 
 /*
  * Memory allocator
  */
 
-void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-void  cfs_free(void *addr);
+void *kmalloc(size_t nr_bytes, u_int32_t flags);
+void  kfree(void *addr);
 
-void *cfs_alloc_large(size_t nr_bytes);
-void  cfs_free_large(void *addr);
+void *vmalloc(size_t nr_bytes);
+void  vfree(void *addr);
 
 extern int get_preemption_level(void);
 
-#define CFS_ALLOC_ATOMIC_TRY                                    \
-       (get_preemption_level() != 0 ? CFS_ALLOC_ATOMIC : 0)
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+       /* allocation is not allowed to block */
+       GFP_ATOMIC = 0x1,
+       /* allocation is allowed to block */
+       __GFP_WAIT   = 0x2,
+       /* allocation should return zeroed memory */
+       __GFP_ZERO   = 0x4,
+       /* allocation is allowed to call file-system code to free/clean
+        * memory */
+       __GFP_FS     = 0x8,
+       /* allocation is allowed to do io to free/clean memory */
+       __GFP_IO     = 0x10,
+       /* don't report allocation failure to the console */
+       __GFP_NOWARN = 0x20,
+       /* standard allocator flag combination */
+       GFP_IOFS    = __GFP_FS | __GFP_IO,
+       GFP_USER   = __GFP_WAIT | __GFP_FS | __GFP_IO,
+       GFP_NOFS   = __GFP_WAIT | __GFP_IO,
+       GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+       /* allow to return page beyond KVM. It has to be mapped into KVM by
+        * kmap() and unmapped with kunmap(). */
+       __GFP_HIGHMEM  = 0x40,
+       GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+                            __GFP_HIGHMEM,
+};
+
+#define ALLOC_ATOMIC_TRY                                    \
+       (get_preemption_level() != 0 ? GFP_ATOMIC : 0)
+
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
 
 /*
  * Slab:
@@ -207,31 +244,32 @@ typedef   zone_t          mem_cache_t;
 
 #define MC_NAME_MAX_LEN                64
 
-typedef struct cfs_mem_cache {
+struct kmem_cache {
        int                     mc_size;
        mem_cache_t             mc_cache;
        struct list_head        mc_link;
        char                    mc_name [MC_NAME_MAX_LEN];
-} cfs_mem_cache_t;
+};
 
 #define KMEM_CACHE_MAX_COUNT   64
 #define KMEM_MAX_ZONE          8192
 
-cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long);
-int cfs_mem_cache_destroy ( cfs_mem_cache_t * );
-void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
-void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
+struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+                                    unsigned long, void *);
+void kmem_cache_destroy(struct kmem_cache *);
+void *kmem_cache_alloc(struct kmem_cache *, int);
+void kmem_cache_free(struct kmem_cache *, void *);
 
 /*
  * Misc
  */
 /* XXX Liang: num_physpages... fix me */
 #define num_physpages                  (64 * 1024)
-#define CFS_NUM_CACHEPAGES             num_physpages
+#define NUM_CACHEPAGES         num_physpages
 
-#define CFS_DECL_MMSPACE               
-#define CFS_MMSPACE_OPEN               do {} while(0)
-#define CFS_MMSPACE_CLOSE              do {} while(0)
+#define DECL_MMSPACE
+#define MMSPACE_OPEN           do {} while (0)
+#define MMSPACE_CLOSE          do {} while (0)
 
 #define copy_from_user(kaddr, uaddr, size)     copyin(CAST_USER_ADDR_T(uaddr), (caddr_t)kaddr, size)
 #define copy_to_user(uaddr, kaddr, size)       copyout((caddr_t)kaddr, CAST_USER_ADDR_T(uaddr), size)
index 34c36a3..c44b8a7 100644 (file)
@@ -185,39 +185,6 @@ struct cfs_psdev_ops {
 };
 
 /*
- * Universal memory allocator API
- */
-enum cfs_alloc_flags {
-        /* allocation is not allowed to block */
-        CFS_ALLOC_ATOMIC = 0x1,
-        /* allocation is allowed to block */
-        CFS_ALLOC_WAIT   = 0x2,
-        /* allocation should return zeroed memory */
-        CFS_ALLOC_ZERO   = 0x4,
-        /* allocation is allowed to call file-system code to free/clean
-         * memory */
-        CFS_ALLOC_FS     = 0x8,
-        /* allocation is allowed to do io to free/clean memory */
-        CFS_ALLOC_IO     = 0x10,
-        /* don't report allocation failure to the console */
-        CFS_ALLOC_NOWARN = 0x20,
-        /* standard allocator flag combination */
-        CFS_ALLOC_STD    = CFS_ALLOC_FS | CFS_ALLOC_IO,
-        CFS_ALLOC_USER   = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO,
-       CFS_ALLOC_NOFS   = CFS_ALLOC_WAIT | CFS_ALLOC_IO,
-       CFS_ALLOC_KERNEL = CFS_ALLOC_WAIT | CFS_ALLOC_IO | CFS_ALLOC_FS,
-};
-
-/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
-enum cfs_alloc_page_flags {
-       /* allow to return page beyond KVM. It has to be mapped into KVM by
-        * cfs_kmap() and unmapped with cfs_kunmap(). */
-       CFS_ALLOC_HIGHMEM  = 0x40,
-       CFS_ALLOC_HIGHUSER = CFS_ALLOC_WAIT | CFS_ALLOC_FS | CFS_ALLOC_IO |
-                            CFS_ALLOC_HIGHMEM,
-};
-
-/*
  * Drop into debugger, if possible. Implementation is provided by platform.
  */
 
index 291191a..64ca62f 100644 (file)
@@ -157,7 +157,7 @@ struct cfs_crypto_hash_desc*
  *     @retval 0               for success.
  */
 int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
-                               cfs_page_t *page, unsigned int offset,
+                               struct page *page, unsigned int offset,
                                unsigned int len);
 
 /**    Update digest by part of data.
index 4a6588e..1f066e9 100644 (file)
@@ -90,31 +90,21 @@ cfs_time_t cfs_timer_deadline(cfs_timer_t *t);
 /*
  * Memory
  */
-#ifndef cfs_memory_pressure_get
-#define cfs_memory_pressure_get() (0)
-#endif
-#ifndef cfs_memory_pressure_set
-#define cfs_memory_pressure_set() do {} while (0)
-#endif
-#ifndef cfs_memory_pressure_clr
-#define cfs_memory_pressure_clr() do {} while (0)
-#endif
-
 static inline int cfs_memory_pressure_get_and_set(void)
 {
-        int old = cfs_memory_pressure_get();
+       int old = memory_pressure_get();
 
-        if (!old)
-                cfs_memory_pressure_set();
-        return old;
+       if (!old)
+               memory_pressure_set();
+       return old;
 }
 
 static inline void cfs_memory_pressure_restore(int old)
 {
-        if (old)
-                cfs_memory_pressure_set();
-        else
-                cfs_memory_pressure_clr();
-        return;
+       if (old)
+               memory_pressure_set();
+       else
+               memory_pressure_clr();
+       return;
 }
 #endif
index 921431a..bf75401 100644 (file)
@@ -154,14 +154,14 @@ do {                                              \
 #endif /* LIBCFS_DEBUG */
 
 #ifndef LIBCFS_VMALLOC_SIZE
-#define LIBCFS_VMALLOC_SIZE        (2 << CFS_PAGE_SHIFT) /* 2 pages */
+#define LIBCFS_VMALLOC_SIZE        (2 << PAGE_CACHE_SHIFT) /* 2 pages */
 #endif
 
 #define LIBCFS_ALLOC_PRE(size, mask)                                       \
 do {                                                                       \
        LASSERT(!cfs_in_interrupt() ||                                      \
                ((size) <= LIBCFS_VMALLOC_SIZE &&                           \
-                ((mask) & CFS_ALLOC_ATOMIC)) != 0);                        \
+                ((mask) & GFP_ATOMIC)) != 0);                      \
 } while (0)
 
 #define LIBCFS_ALLOC_POST(ptr, size)                                       \
@@ -186,7 +186,7 @@ do {                                                                            \
 do {                                                                       \
        LIBCFS_ALLOC_PRE((size), (mask));                                   \
        (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ?                             \
-               cfs_alloc((size), (mask)) : cfs_alloc_large(size);          \
+               kmalloc((size), (mask)) : vmalloc(size);            \
        LIBCFS_ALLOC_POST((ptr), (size));                                   \
 } while (0)
 
@@ -194,13 +194,13 @@ do {                                                                          \
  * default allocator
  */
 #define LIBCFS_ALLOC(ptr, size) \
-        LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_IO)
+       LIBCFS_ALLOC_GFP(ptr, size, __GFP_IO)
 
 /**
  * non-sleeping allocator
  */
 #define LIBCFS_ALLOC_ATOMIC(ptr, size) \
-        LIBCFS_ALLOC_GFP(ptr, size, CFS_ALLOC_ATOMIC)
+       LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC)
 
 /**
  * allocate memory for specified CPU partition
@@ -218,23 +218,23 @@ do {                                                                          \
 
 /** default numa allocator */
 #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size)                                    \
-       LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, CFS_ALLOC_IO)
+       LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO)
 
-#define LIBCFS_FREE(ptr, size)                                          \
-do {                                                                    \
-        int s = (size);                                                 \
-        if (unlikely((ptr) == NULL)) {                                  \
-                CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at "    \
-                       "%s:%d\n", s, __FILE__, __LINE__);               \
-                break;                                                  \
-        }                                                               \
-        libcfs_kmem_dec((ptr), s);                                      \
-        CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n",     \
+#define LIBCFS_FREE(ptr, size)                                         \
+do {                                                                   \
+       int s = (size);                                                 \
+       if (unlikely((ptr) == NULL)) {                                  \
+               CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at "    \
+                      "%s:%d\n", s, __FILE__, __LINE__);               \
+               break;                                                  \
+       }                                                               \
+       libcfs_kmem_dec((ptr), s);                                      \
+       CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n",     \
               s, (ptr), libcfs_kmem_read());                           \
-        if (unlikely(s > LIBCFS_VMALLOC_SIZE))                          \
-                cfs_free_large(ptr);                                    \
-        else                                                            \
-                cfs_free(ptr);                                          \
+       if (unlikely(s > LIBCFS_VMALLOC_SIZE))                          \
+               vfree(ptr);                                             \
+       else                                                            \
+               kfree(ptr);                                             \
 } while (0)
 
 /******************************************************************************/
@@ -586,8 +586,8 @@ int         cfs_match_nid(lnet_nid_t nid, cfs_list_t *list);
 
 struct libcfs_device_userstate
 {
-        int           ldu_memhog_pages;
-        cfs_page_t   *ldu_memhog_root_page;
+       int             ldu_memhog_pages;
+       struct page     *ldu_memhog_root_page;
 };
 
 /* what used to be in portals_lib.h */
index 36961a0..e91396c 100644 (file)
@@ -51,7 +51,7 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
                  int *oldmask, int minmask, int allmask);
 
 /* Allocate space for and copy an existing string.
- * Must free with cfs_free().
+ * Must free with kfree().
  */
 char *cfs_strdup(const char *str, u_int32_t flags);
 
index 6319efa..de4f27b 100644 (file)
@@ -216,7 +216,7 @@ extern lwt_cpu_t lwt_cpus[];
 
 #define LWTSTR(n)       #n
 #define LWTWHERE(f,l)   f ":" LWTSTR(l)
-#define LWT_EVENTS_PER_PAGE (CFS_PAGE_SIZE / sizeof (lwt_event_t))
+#define LWT_EVENTS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lwt_event_t))
 
 #define LWT_EVENT(p1, p2, p3, p4)                                       \
 do {                                                                    \
index 1d70be8..0ece47d 100644 (file)
 # include <linux/mm_inline.h>
 #endif
 
-typedef struct page                     cfs_page_t;
-#define CFS_PAGE_SIZE                   PAGE_CACHE_SIZE
-#define CFS_PAGE_SHIFT                  PAGE_CACHE_SHIFT
-#define CFS_PAGE_MASK                   (~((__u64)CFS_PAGE_SIZE-1))
+#define CFS_PAGE_MASK                   (~((__u64)PAGE_CACHE_SIZE-1))
 
-#define cfs_num_physpages               num_physpages
+#define page_index(p)       ((p)->index)
 
-#define cfs_copy_from_user(to, from, n) copy_from_user(to, from, n)
-#define cfs_copy_to_user(to, from, n)   copy_to_user(to, from, n)
 
-static inline void *cfs_page_address(cfs_page_t *page)
-{
-        /*
-         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
-         * from here: this will lead to infinite recursion.
-         */
-        return page_address(page);
-}
-
-static inline void *cfs_kmap(cfs_page_t *page)
-{
-        return kmap(page);
-}
-
-static inline void cfs_kunmap(cfs_page_t *page)
-{
-        kunmap(page);
-}
-
-static inline void cfs_get_page(cfs_page_t *page)
-{
-        get_page(page);
-}
-
-static inline int cfs_page_count(cfs_page_t *page)
-{
-        return page_count(page);
-}
-
-#define cfs_page_index(p)       ((p)->index)
-
-#define cfs_page_pin(page) page_cache_get(page)
-#define cfs_page_unpin(page) page_cache_release(page)
-
-/*
- * Memory allocator
- * XXX Liang: move these declare to public file
- */
-extern void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-extern void  cfs_free(void *addr);
-
-extern void *cfs_alloc_large(size_t nr_bytes);
-extern void  cfs_free_large(void *addr);
-
-extern cfs_page_t *cfs_alloc_page(unsigned int flags);
-extern void cfs_free_page(cfs_page_t *page);
-
-#define cfs_memory_pressure_get() (current->flags & PF_MEMALLOC)
-#define cfs_memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
-#define cfs_memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
+#define memory_pressure_get() (current->flags & PF_MEMALLOC)
+#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
+#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
 
 #if BITS_PER_LONG == 32
 /* limit to lowmem on 32-bit systems */
-#define CFS_NUM_CACHEPAGES \
-        min(cfs_num_physpages, 1UL << (30 - CFS_PAGE_SHIFT) * 3 / 4)
+#define NUM_CACHEPAGES \
+       min(num_physpages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
 #else
-#define CFS_NUM_CACHEPAGES cfs_num_physpages
+#define NUM_CACHEPAGES num_physpages
 #endif
 
 /*
  * In Linux there is no way to determine whether current execution context is
  * blockable.
  */
-#define CFS_ALLOC_ATOMIC_TRY   CFS_ALLOC_ATOMIC
+#define ALLOC_ATOMIC_TRY   GFP_ATOMIC
+/* GFP_IOFS was added in 2.6.33 kernel */
+#ifndef GFP_IOFS
+#define GFP_IOFS       (__GFP_IO | __GFP_FS)
+#endif
 
-/*
- * SLAB allocator
- * XXX Liang: move these declare to public file
- */
-typedef struct kmem_cache cfs_mem_cache_t;
-extern cfs_mem_cache_t * cfs_mem_cache_create (const char *, size_t, size_t, unsigned long);
-extern int cfs_mem_cache_destroy ( cfs_mem_cache_t * );
-extern void *cfs_mem_cache_alloc ( cfs_mem_cache_t *, int);
-extern void cfs_mem_cache_free ( cfs_mem_cache_t *, void *);
-extern int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
-
-#define CFS_DECL_MMSPACE                mm_segment_t __oldfs
-#define CFS_MMSPACE_OPEN \
+#define DECL_MMSPACE                mm_segment_t __oldfs
+#define MMSPACE_OPEN \
         do { __oldfs = get_fs(); set_fs(get_ds());} while(0)
-#define CFS_MMSPACE_CLOSE               set_fs(__oldfs)
+#define MMSPACE_CLOSE               set_fs(__oldfs)
 
-#define CFS_SLAB_HWCACHE_ALIGN          SLAB_HWCACHE_ALIGN
-#define CFS_SLAB_KERNEL                 SLAB_KERNEL
-#define CFS_SLAB_NOFS                   SLAB_NOFS
 
-/*
- * NUMA allocators
- *
- * NB: we will rename these functions in a separate patch:
- * - rename cfs_alloc to cfs_malloc
- * - rename cfs_alloc/free_page to cfs_page_alloc/free
- * - rename cfs_alloc/free_large to cfs_vmalloc/vfree
- */
 extern void *cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
                            size_t nr_bytes, unsigned int flags);
 extern void *cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt,
                             size_t nr_bytes);
-extern cfs_page_t *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
+extern struct page *cfs_page_cpt_alloc(struct cfs_cpt_table *cptab,
                                      int cpt, unsigned int flags);
-extern void *cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep,
+extern void *cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep,
                                     struct cfs_cpt_table *cptab,
                                     int cpt, unsigned int flags);
 
 /*
  * Shrinker
  */
-#define cfs_shrinker    shrinker
 
 #ifdef HAVE_SHRINK_CONTROL
 # define SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)  \
@@ -193,10 +122,10 @@ extern void *cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep,
 #endif
 
 #ifdef HAVE_REGISTER_SHRINKER
-typedef int (*cfs_shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
+typedef int (*shrinker_t)(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask));
 
 static inline
-struct cfs_shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
+struct shrinker *set_shrinker(int seek, shrinker_t func)
 {
         struct shrinker *s;
 
@@ -213,7 +142,7 @@ struct cfs_shrinker *cfs_set_shrinker(int seek, cfs_shrinker_t func)
 }
 
 static inline
-void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
+void remove_shrinker(struct shrinker *shrinker)
 {
         if (shrinker == NULL)
                 return;
@@ -221,11 +150,6 @@ void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
         unregister_shrinker(shrinker);
         kfree(shrinker);
 }
-#else
-typedef shrinker_t              cfs_shrinker_t;
-#define cfs_set_shrinker(s, f)  set_shrinker(s, f)
-#define cfs_remove_shrinker(s)  remove_shrinker(s)
 #endif
 
-#define CFS_DEFAULT_SEEKS                 DEFAULT_SEEKS
 #endif /* __LINUX_CFS_MEM_H__ */
index 305b74b..4a11e71 100644 (file)
@@ -277,21 +277,21 @@ static inline int cfs_module_refcount(cfs_module_t *m)
  *
  ***************************************************************************/
 
-struct cfs_shrinker {
+struct shrinker {
         ;
 };
 
-#define CFS_DEFAULT_SEEKS (0)
+#define DEFAULT_SEEKS (0)
 
-typedef int (*cfs_shrinker_t)(int, unsigned int);
+typedef int (*shrinker_t)(int, unsigned int);
 
 static inline
-struct cfs_shrinker *cfs_set_shrinker(int seeks, cfs_shrinker_t shrink)
+struct shrinker *set_shrinker(int seeks, shrinker_t shrink)
 {
-        return (struct cfs_shrinker *)0xdeadbea1; // Cannot return NULL here
+       return (struct shrinker *)0xdeadbea1; /* Cannot return NULL here */
 }
 
-static inline void cfs_remove_shrinker(struct cfs_shrinker *shrinker)
+static inline void remove_shrinker(struct shrinker *shrinker)
 {
 }
 
index e48b124..8ab9c25 100644 (file)
@@ -37,7 +37,7 @@
  */
 #define LIBLUSTRE_HANDLE_UNALIGNED_PAGE
 
-typedef struct page {
+struct page {
         void   *addr;
         unsigned long index;
         cfs_list_t list;
@@ -50,92 +50,129 @@ typedef struct page {
         int     _managed;
 #endif
         cfs_list_t _node;
-} cfs_page_t;
+};
 
 
 /* 4K */
-#define CFS_PAGE_SHIFT 12
-#define CFS_PAGE_SIZE (1UL << CFS_PAGE_SHIFT)
-#define CFS_PAGE_MASK (~((__u64)CFS_PAGE_SIZE-1))
-
-cfs_page_t *cfs_alloc_page(unsigned int flags);
-void cfs_free_page(cfs_page_t *pg);
-void *cfs_page_address(cfs_page_t *pg);
-void *cfs_kmap(cfs_page_t *pg);
-void cfs_kunmap(cfs_page_t *pg);
-
-#define cfs_get_page(p)                        __I_should_not_be_called__(at_all)
-#define cfs_page_count(p)              __I_should_not_be_called__(at_all)
-#define cfs_page_index(p)               ((p)->index)
-#define cfs_page_pin(page) do {} while (0)
-#define cfs_page_unpin(page) do {} while (0)
+#define PAGE_CACHE_SHIFT 12
+#define PAGE_CACHE_SIZE (1UL << PAGE_CACHE_SHIFT)
+#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
+
+struct page *alloc_page(unsigned int flags);
+void __free_page(struct page *pg);
+void *page_address(struct page *pg);
+void *kmap(struct page *pg);
+void kunmap(struct page *pg);
+
+#define get_page(p)                    __I_should_not_be_called__(at_all)
+#define page_count(p)          __I_should_not_be_called__(at_all)
+#define page_index(p)               ((p)->index)
+#define page_cache_get(page) do { } while (0)
+#define page_cache_release(page) do { } while (0)
 
 /*
  * Memory allocator
  * Inline function, so utils can use them without linking of libcfs
  */
-#define __ALLOC_ZERO    (1 << 2)
-static inline void *cfs_alloc(size_t nr_bytes, u_int32_t flags)
+
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+       /* allocation is not allowed to block */
+       GFP_ATOMIC = 0x1,
+       /* allocation is allowed to block */
+       __GFP_WAIT   = 0x2,
+       /* allocation should return zeroed memory */
+       __GFP_ZERO   = 0x4,
+       /* allocation is allowed to call file-system code to free/clean
+        * memory */
+       __GFP_FS     = 0x8,
+       /* allocation is allowed to do io to free/clean memory */
+       __GFP_IO     = 0x10,
+       /* don't report allocation failure to the console */
+       __GFP_NOWARN = 0x20,
+       /* standard allocator flag combination */
+       GFP_IOFS    = __GFP_FS | __GFP_IO,
+       GFP_USER   = __GFP_WAIT | __GFP_FS | __GFP_IO,
+       GFP_NOFS   = __GFP_WAIT | __GFP_IO,
+       GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+       /* allow to return page beyond KVM. It has to be mapped into KVM by
+        * kmap() and unmapped with kunmap(). */
+       __GFP_HIGHMEM  = 0x40,
+       GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+                            __GFP_HIGHMEM,
+};
+
+static inline void *kmalloc(size_t nr_bytes, u_int32_t flags)
 {
-        void *result;
+       void *result;
 
-        result = malloc(nr_bytes);
-        if (result != NULL && (flags & __ALLOC_ZERO))
-                memset(result, 0, nr_bytes);
-        return result;
+       result = malloc(nr_bytes);
+       if (result != NULL && (flags & __GFP_ZERO))
+               memset(result, 0, nr_bytes);
+       return result;
 }
 
-#define cfs_free(addr)  free(addr)
-#define cfs_alloc_large(nr_bytes) cfs_alloc(nr_bytes, 0)
-#define cfs_free_large(addr) cfs_free(addr)
+#define kfree(addr)  free(addr)
+#define vmalloc(nr_bytes) kmalloc(nr_bytes, 0)
+#define vfree(addr) free(addr)
 
-#define CFS_ALLOC_ATOMIC_TRY   (0)
+#define ALLOC_ATOMIC_TRY   (0)
 /*
  * SLAB allocator
  */
-typedef struct {
+struct kmem_cache {
          int size;
-} cfs_mem_cache_t;
+};
 
-#define CFS_SLAB_HWCACHE_ALIGN 0
+#define SLAB_HWCACHE_ALIGN 0
 #define SLAB_DESTROY_BY_RCU 0
-#define CFS_SLAB_KERNEL 0
-#define CFS_SLAB_NOFS 0
+#define SLAB_KERNEL 0
+#define SLAB_NOFS 0
+
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
 
-cfs_mem_cache_t *
-cfs_mem_cache_create(const char *, size_t, size_t, unsigned long);
-int cfs_mem_cache_destroy(cfs_mem_cache_t *c);
-void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp);
-void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr);
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
+struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+                                    unsigned long, void *);
+void kmem_cache_destroy(struct kmem_cache *c);
+void *kmem_cache_alloc(struct kmem_cache *c, int gfp);
+void kmem_cache_free(struct kmem_cache *c, void *addr);
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem);
 
 /*
  * NUMA allocators
  */
 #define cfs_cpt_malloc(cptab, cpt, bytes, flags)       \
-       cfs_alloc(bytes, flags)
+       kmalloc(bytes, flags)
 #define cfs_cpt_vmalloc(cptab, cpt, bytes)             \
-       cfs_alloc(bytes)
+       kmalloc(bytes)
 #define cfs_page_cpt_alloc(cptab, cpt, mask)           \
-       cfs_alloc_page(mask)
+       alloc_page(mask)
 #define cfs_mem_cache_cpt_alloc(cache, cptab, cpt, gfp)        \
-       cfs_mem_cache_alloc(cache, gfp)
+       kmem_cache_alloc(cache, gfp)
 
 #define smp_rmb()      do {} while (0)
 
 /*
  * Copy to/from user
  */
-static inline int cfs_copy_from_user(void *a,void *b, int c)
+static inline int copy_from_user(void *a, void *b, int c)
 {
-        memcpy(a,b,c);
-        return 0;
+       memcpy(a, b, c);
+       return 0;
 }
 
-static inline int cfs_copy_to_user(void *a,void *b, int c)
+static inline int copy_to_user(void *a, void *b, int c)
 {
-        memcpy(a,b,c);
-        return 0;
+       memcpy(a,b,c);
+       return 0;
 }
 
 #endif
index e5fc164..927f515 100644 (file)
@@ -265,16 +265,16 @@ static inline void read_random(char *buf, int len)
        ((unsigned char *)&addr)[1],    \
        ((unsigned char *)&addr)[0]
 
-static int cfs_copy_from_user(void *to, void *from, int c) 
+static int copy_from_user(void *to, void *from, int c)
 {
-    memcpy(to, from, c);
-    return 0;
+       memcpy(to, from, c);
+       return 0;
 }
 
-static int cfs_copy_to_user(void *to, const void *from, int c) 
+static int copy_to_user(void *to, const void *from, int c)
 {
-    memcpy(to, from, c);
-    return 0;
+       memcpy(to, from, c);
+       return 0;
 }
 
 static unsigned long
@@ -297,8 +297,8 @@ clear_user(void __user *to, unsigned long n)
     0                           \
 )
 
-#define cfs_num_physpages               (64 * 1024)
-#define CFS_NUM_CACHEPAGES              cfs_num_physpages
+#define num_physpages               (64 * 1024)
+#define NUM_CACHEPAGES              num_physpages
 
 #else
 
index 2435915..e13dd59 100644 (file)
 
 #ifdef __KERNEL__
 
-typedef struct cfs_mem_cache cfs_mem_cache_t;
-
 /*
  * page definitions
  */
 
-#define CFS_PAGE_SIZE                   PAGE_SIZE
-#define CFS_PAGE_SHIFT                  PAGE_SHIFT
+#define PAGE_CACHE_SIZE                   PAGE_SIZE
+#define PAGE_CACHE_SHIFT                  PAGE_SHIFT
 #define CFS_PAGE_MASK                   (~(PAGE_SIZE - 1))
 
-typedef struct cfs_page {
+#define memory_pressure_get() (0)
+#define memory_pressure_set() do {} while (0)
+#define memory_pressure_clr() do {} while (0)
+
+struct page {
     void *          addr;
     cfs_atomic_t    count;
     void *          private;
     void *          mapping;
     __u32           index;
     __u32           flags;
-} cfs_page_t;
+};
 
 #define page cfs_page
 
@@ -146,90 +148,115 @@ typedef struct cfs_page {
 #define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback,  \
                                                        &(page)->flags)
 
-#define __GFP_FS    (1)
-#define GFP_KERNEL  (2)
-#define GFP_ATOMIC  (4)
+/*
+ * Universal memory allocator API
+ */
+enum cfs_alloc_flags {
+       /* allocation is not allowed to block */
+       GFP_ATOMIC = 0x1,
+       /* allocation is allowed to block */
+       __GFP_WAIT   = 0x2,
+       /* allocation should return zeroed memory */
+       __GFP_ZERO   = 0x4,
+       /* allocation is allowed to call file-system code to free/clean
+        * memory */
+       __GFP_FS     = 0x8,
+       /* allocation is allowed to do io to free/clean memory */
+       __GFP_IO     = 0x10,
+       /* don't report allocation failure to the console */
+       __GFP_NOWARN = 0x20,
+       /* standard allocator flag combination */
+       GFP_IOFS    = __GFP_FS | __GFP_IO,
+       GFP_USER   = __GFP_WAIT | __GFP_FS | __GFP_IO,
+       GFP_NOFS   = __GFP_WAIT | __GFP_IO,
+       GFP_KERNEL = __GFP_WAIT | __GFP_IO | __GFP_FS,
+};
+
+/* flags for cfs_page_alloc() in addition to enum cfs_alloc_flags */
+enum cfs_alloc_page_flags {
+       /* allow to return page beyond KVM. It has to be mapped into KVM by
+        * kmap() and unmapped with kunmap(). */
+       __GFP_HIGHMEM  = 0x40,
+       GFP_HIGHUSER = __GFP_WAIT | __GFP_FS | __GFP_IO |
+                            __GFP_HIGHMEM,
+};
 
-cfs_page_t *cfs_alloc_page(int flags);
-void cfs_free_page(cfs_page_t *pg);
-void cfs_release_page(cfs_page_t *pg);
-cfs_page_t * virt_to_page(void * addr);
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem);
+struct page *alloc_page(int flags);
+void __free_page(struct page *pg);
+void cfs_release_page(struct page *pg);
+struct page *virt_to_page(void *addr);
 
 #define page_cache_get(a) do {} while (0)
 #define page_cache_release(a) do {} while (0)
 
-static inline void *cfs_page_address(cfs_page_t *page)
+static inline void *page_address(struct page *page)
 {
     return page->addr;
 }
 
-static inline void *cfs_kmap(cfs_page_t *page)
+static inline void *kmap(struct page *page)
 {
     return page->addr;
 }
 
-static inline void cfs_kunmap(cfs_page_t *page)
+static inline void kunmap(struct page *page)
 {
     return;
 }
 
-static inline void cfs_get_page(cfs_page_t *page)
+static inline void get_page(struct page *page)
 {
     cfs_atomic_inc(&page->count);
 }
 
-static inline void cfs_put_page(cfs_page_t *page)
+static inline void cfs_put_page(struct page *page)
 {
     cfs_atomic_dec(&page->count);
 }
 
-static inline int cfs_page_count(cfs_page_t *page)
+static inline int page_count(struct page *page)
 {
     return cfs_atomic_read(&page->count);
 }
 
-#define cfs_page_index(p)       ((p)->index)
+#define page_index(p)       ((p)->index)
 
 /*
  * Memory allocator
  */
 
-#define CFS_ALLOC_ATOMIC_TRY   (0)
-extern void *cfs_alloc(size_t nr_bytes, u_int32_t flags);
-extern void  cfs_free(void *addr);
-
-#define kmalloc cfs_alloc
-
-extern void *cfs_alloc_large(size_t nr_bytes);
-extern void  cfs_free_large(void *addr);
+#define ALLOC_ATOMIC_TRY       (0)
+extern void *kmalloc(size_t nr_bytes, u_int32_t flags);
+extern void  kfree(void *addr);
+extern void *vmalloc(size_t nr_bytes);
+extern void  vfree(void *addr);
 
 /*
  * SLAB allocator
  */
 
-#define CFS_SLAB_HWCACHE_ALIGN         0
+#define SLAB_HWCACHE_ALIGN             0
 
 /* The cache name is limited to 20 chars */
 
-struct cfs_mem_cache {
+struct kmem_cache {
     char                    name[20];
     ulong_ptr_t             flags;
     NPAGED_LOOKASIDE_LIST   npll;
 };
 
 
-extern cfs_mem_cache_t *cfs_mem_cache_create (const char *, size_t, size_t,
-                                              unsigned long);
-extern int cfs_mem_cache_destroy (cfs_mem_cache_t * );
-extern void *cfs_mem_cache_alloc (cfs_mem_cache_t *, int);
-extern void cfs_mem_cache_free (cfs_mem_cache_t *, void *);
+extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
+                                           unsigned long, void *);
+extern kmem_cache_destroy(struct kmem_cache *);
+extern void *kmem_cache_alloc(struct kmem_cache *, int);
+extern void kmem_cache_free(struct kmem_cache *, void *);
 
 /*
  * shrinker 
  */
 typedef int (*shrink_callback)(int nr_to_scan, gfp_t gfp_mask);
-struct cfs_shrinker {
+struct shrinker {
         shrink_callback cb;
        int seeks;      /* seeks to recreate an obj */
 
@@ -238,8 +265,8 @@ struct cfs_shrinker {
        long nr;        /* objs pending delete */
 };
 
-struct cfs_shrinker *cfs_set_shrinker(int seeks, shrink_callback cb);
-void cfs_remove_shrinker(struct cfs_shrinker *s);
+struct shrinker *set_shrinker(int seeks, shrink_callback cb);
+void remove_shrinker(struct shrinker *s);
 
 int start_shrinker_timer();
 void stop_shrinker_timer();
@@ -248,13 +275,13 @@ void stop_shrinker_timer();
  * Page allocator slabs 
  */
 
-extern cfs_mem_cache_t *cfs_page_t_slab;
-extern cfs_mem_cache_t *cfs_page_p_slab;
+extern struct kmem_cache *cfs_page_t_slab;
+extern struct kmem_cache *cfs_page_p_slab;
 
 
-#define CFS_DECL_MMSPACE
-#define CFS_MMSPACE_OPEN    do {} while(0)
-#define CFS_MMSPACE_CLOSE   do {} while(0)
+#define DECL_MMSPACE
+#define MMSPACE_OPEN    do {} while (0)
+#define MMSPACE_CLOSE   do {} while (0)
 
 
 #define cfs_mb()     do {} while(0)
@@ -265,7 +292,7 @@ extern cfs_mem_cache_t *cfs_page_p_slab;
  * MM defintions from (linux/mm.h)
  */
 
-#define CFS_DEFAULT_SEEKS 2 /* shrink seek */
+#define DEFAULT_SEEKS 2 /* shrink seek */
 
 #else  /* !__KERNEL__ */
 
index e234bba..1f98fd5 100644 (file)
@@ -485,20 +485,21 @@ static __inline cfs_group_info_t *cfs_groups_alloc(int gidsetsize)
 {
     cfs_group_info_t * groupinfo;
     KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__, __FUNCTION__));
-    groupinfo =
-        (cfs_group_info_t *)cfs_alloc(sizeof(cfs_group_info_t), 0);
+    groupinfo = kmalloc(sizeof(cfs_group_info_t), 0);
 
     if (groupinfo) {
         memset(groupinfo, 0, sizeof(cfs_group_info_t));
     }
     return groupinfo;
 }
+
 static __inline void cfs_groups_free(cfs_group_info_t *group_info)
 {
-    KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
-            __FUNCTION__));
-    cfs_free(group_info);
+       KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
+               __FUNCTION__));
+       kfree(group_info);
 }
+
 static __inline int
 cfs_set_current_groups(cfs_group_info_t *group_info)
 {
@@ -506,6 +507,7 @@ cfs_set_current_groups(cfs_group_info_t *group_info)
              __FUNCTION__));
     return 0;
 }
+
 static __inline int groups_search(cfs_group_info_t *group_info,
                                   gid_t grp) {
     KdPrint(("%s(%d): %s NOT implemented.\n", __FILE__, __LINE__,
@@ -597,7 +599,7 @@ typedef struct _TASK_MAN {
 
        spinlock_t      Lock;           /* Protection lock */
 
-       cfs_mem_cache_t *slab;          /* Memory slab for task slot */
+       struct kmem_cache       *slab;          /* Memory slab for task slot */
 
        ULONG           NumOfTasks;     /* Total tasks (threads) */
        LIST_ENTRY      TaskList;       /* List of task slots */
index 74c4c74..85c5695 100644 (file)
@@ -639,14 +639,14 @@ typedef struct {
 
        int             ksnd_ntconns;           /* number of tconns in list */
        cfs_list_t      ksnd_tconns;            /* tdi connections list */
-       cfs_mem_cache_t *ksnd_tconn_slab;       /* ks_tconn_t allocation slabs*/
+       struct kmem_cache *ksnd_tconn_slab;     /* ks_tconn_t allocation slabs*/
        event_t         ksnd_tconn_exit;        /* event signal by last tconn */
 
        spinlock_t      ksnd_tsdu_lock;         /* tsdu access serialise */
 
     int                   ksnd_ntsdus;          /* number of tsdu buffers allocated */
     ulong                 ksnd_tsdu_size;       /* the size of a signel tsdu buffer */
-    cfs_mem_cache_t       *ksnd_tsdu_slab;       /* slab cache for tsdu buffer allocation */
+       struct kmem_cache       *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
 
     int                   ksnd_nfreetsdus;      /* number of tsdu buffers in the freed list */
     cfs_list_t            ksnd_freetsdus;       /* List of the freed Tsdu buffer. */
index 6d57c25..333e010 100644 (file)
@@ -61,9 +61,9 @@ struct cfs_zone_nob {
 static struct cfs_zone_nob      cfs_zone_nob;
 static spinlock_t              cfs_zone_guard;
 
-cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
+struct kmem_cache *mem_cache_find(const char *name, size_t objsize)
 {
-       cfs_mem_cache_t         *walker = NULL;
+       struct kmem_cache               *walker = NULL;
 
        LASSERT(cfs_zone_nob.z_nob != NULL);
 
@@ -85,12 +85,12 @@ cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
  * survives kext unloading, so that @name cannot be just static string
  * embedded into kext image.
  */
-cfs_mem_cache_t *mem_cache_create(vm_size_t objsize, const char *name)
+struct kmem_cache *mem_cache_create(vm_size_t objsize, const char *name)
 {
-       cfs_mem_cache_t *mc = NULL;
+       struct kmem_cache       *mc = NULL;
         char *cname;
 
-       MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO);
+       MALLOC(mc, struct kmem_cache *, sizeof(struct kmem_cache), M_TEMP, M_WAITOK|M_ZERO);
        if (mc == NULL){
                CERROR("cfs_mem_cache created fail!\n");
                return NULL;
@@ -105,7 +105,7 @@ cfs_mem_cache_t *mem_cache_create(vm_size_t objsize, const char *name)
         return mc;
 }
 
-void mem_cache_destroy(cfs_mem_cache_t *mc)
+void mem_cache_destroy(struct kmem_cache *mc)
 {
         /*
          * zone can NOT be destroyed after creating, 
@@ -128,17 +128,17 @@ void mem_cache_destroy(cfs_mem_cache_t *mc)
 
 #else  /* !CFS_INDIVIDUAL_ZONE */
 
-cfs_mem_cache_t *
+struct kmem_cache *
 mem_cache_find(const char *name, size_t objsize)
 {
         return NULL;
 }
 
-cfs_mem_cache_t *mem_cache_create(vm_size_t size, const char *name)
+struct kmem_cache *mem_cache_create(vm_size_t size, const char *name)
 {
-        cfs_mem_cache_t *mc = NULL;
+       struct kmem_cache *mc = NULL;
 
-       MALLOC(mc, cfs_mem_cache_t *, sizeof(cfs_mem_cache_t), M_TEMP, M_WAITOK|M_ZERO);
+       MALLOC(mc, struct kmem_cache *, sizeof(struct kmem_cache), M_TEMP, M_WAITOK|M_ZERO);
        if (mc == NULL){
                CERROR("cfs_mem_cache created fail!\n");
                return NULL;
@@ -148,7 +148,7 @@ cfs_mem_cache_t *mem_cache_create(vm_size_t size, const char *name)
         return mc;
 }
 
-void mem_cache_destroy(cfs_mem_cache_t *mc)
+void mem_cache_destroy(struct kmem_cache *mc)
 {
         OSMalloc_Tagfree(mc->mc_cache);
         FREE(mc, M_TEMP);
@@ -160,45 +160,45 @@ void mem_cache_destroy(cfs_mem_cache_t *mc)
 
 #endif /* !CFS_INDIVIDUAL_ZONE */
 
-cfs_mem_cache_t *
-cfs_mem_cache_create (const char *name,
-                      size_t objsize, size_t off, unsigned long arg1)
+struct kmem_cache *
+kmem_cache_create(const char *name, size_t objsize, size_t off,
+                 unsigned long arg1, void *ctro)
 {
-        cfs_mem_cache_t *mc;
+       struct kmem_cache *mc;
 
-        mc = mem_cache_find(name, objsize);
-        if (mc)
-                return mc;
-        mc = mem_cache_create(objsize, name);
+       mc = mem_cache_find(name, objsize);
+       if (mc)
+               return mc;
+       mc = mem_cache_create(objsize, name);
        return mc;
 }
 
-int cfs_mem_cache_destroy (cfs_mem_cache_t *cachep)
+kmem_cache_destroy (struct kmem_cache *cachep)
 {
-        mem_cache_destroy(cachep);
-        return 0;
+       mem_cache_destroy(cachep);
+       return 0;
 }
 
-void *cfs_mem_cache_alloc (cfs_mem_cache_t *cachep, int flags)
+void *kmem_cache_alloc (struct kmem_cache *cachep, int flags)
 {
-        void *result;
+       void *result;
 
-        /* zalloc_canblock() is not exported... Emulate it. */
-        if (flags & CFS_ALLOC_ATOMIC) {
-                result = (void *)mem_cache_alloc_nb(cachep);
-        } else {
-                LASSERT(get_preemption_level() == 0);
-                result = (void *)mem_cache_alloc(cachep);
-        }
-        if (result != NULL && (flags & CFS_ALLOC_ZERO))
-                memset(result, 0, cachep->mc_size);
+       /* zalloc_canblock() is not exported... Emulate it. */
+       if (flags & GFP_ATOMIC) {
+               result = (void *)mem_cache_alloc_nb(cachep);
+       } else {
+               LASSERT(get_preemption_level() == 0);
+               result = (void *)mem_cache_alloc(cachep);
+       }
+       if (result != NULL && (flags & __GFP_ZERO))
+               memset(result, 0, cachep->mc_size);
 
-        return result;
+       return result;
 }
 
-void cfs_mem_cache_free (cfs_mem_cache_t *cachep, void *objp)
+void kmem_cache_free (struct kmem_cache *cachep, void *objp)
 {
-        mem_cache_free(cachep, objp);
+       mem_cache_free(cachep, objp);
 }
 
 /* ---------------------------------------------------------------------------
@@ -210,8 +210,8 @@ void cfs_mem_cache_free (cfs_mem_cache_t *cachep, void *objp)
  * "Raw" pages
  */
 
-static unsigned int raw_pages = 0;
-static cfs_mem_cache_t  *raw_page_cache = NULL;
+static unsigned int raw_pages;
+static struct kmem_cache  *raw_page_cache;
 
 static struct xnu_page_ops raw_page_ops;
 static struct xnu_page_ops *page_ops[XNU_PAGE_NTYPES] = {
@@ -219,35 +219,35 @@ static struct xnu_page_ops *page_ops[XNU_PAGE_NTYPES] = {
 };
 
 #if defined(LIBCFS_DEBUG)
-static int page_type_is_valid(cfs_page_t *page)
+static int page_type_is_valid(struct page *page)
 {
-        LASSERT(page != NULL);
-        return 0 <= page->type && page->type < XNU_PAGE_NTYPES;
+       LASSERT(page != NULL);
+       return 0 <= page->type && page->type < XNU_PAGE_NTYPES;
 }
 
-static int page_is_raw(cfs_page_t *page)
+static int page_is_raw(struct page *page)
 {
-        return page->type == XNU_PAGE_RAW;
+       return page->type == XNU_PAGE_RAW;
 }
 #endif
 
-static struct xnu_raw_page *as_raw(cfs_page_t *page)
+static struct xnu_raw_page *as_raw(struct page *page)
 {
-        LASSERT(page_is_raw(page));
-        return list_entry(page, struct xnu_raw_page, header);
+       LASSERT(page_is_raw(page));
+       return list_entry(page, struct xnu_raw_page, header);
 }
 
-static void *raw_page_address(cfs_page_t *pg)
+static void *raw_page_address(struct page *pg)
 {
-        return (void *)as_raw(pg)->virtual;
+       return (void *)as_raw(pg)->virtual;
 }
 
-static void *raw_page_map(cfs_page_t *pg)
+static void *raw_page_map(struct page *pg)
 {
-        return (void *)as_raw(pg)->virtual;
+       return (void *)as_raw(pg)->virtual;
 }
 
-static void raw_page_unmap(cfs_page_t *pg)
+static void raw_page_unmap(struct page *pg)
 {
 }
 
@@ -264,10 +264,10 @@ spinlock_t page_death_row_phylax;
 
 static void raw_page_finish(struct xnu_raw_page *pg)
 {
-        -- raw_pages;
-        if (pg->virtual != NULL)
-                cfs_mem_cache_free(raw_page_cache, pg->virtual);
-        cfs_free(pg);
+       --raw_pages;
+       if (pg->virtual != NULL)
+               kmem_cache_free(raw_page_cache, pg->virtual);
+       kfree(pg);
 }
 
 void raw_page_death_row_clean(void)
@@ -294,7 +294,7 @@ void free_raw_page(struct xnu_raw_page *pg)
        /*
         * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
         * block. (raw_page_done()->upl_abort() can block too) On the other
-        * hand, cfs_free_page() may be called in non-blockable context. To
+        * hand, __free_page() may be called in non-blockable context. To
         * work around this, park pages on global list when cannot block.
         */
        if (get_preemption_level() > 0) {
@@ -307,74 +307,74 @@ void free_raw_page(struct xnu_raw_page *pg)
        }
 }
 
-cfs_page_t *cfs_alloc_page(u_int32_t flags)
+struct page *alloc_page(u_int32_t flags)
 {
-        struct xnu_raw_page *page;
+       struct xnu_raw_page *page;
 
-        /*
-         * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
-         * from here: this will lead to infinite recursion.
-         */
+       /*
+        * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
+        * from here: this will lead to infinite recursion.
+        */
 
-        page = cfs_alloc(sizeof *page, flags);
-        if (page != NULL) {
-                page->virtual = cfs_mem_cache_alloc(raw_page_cache, flags);
-                if (page->virtual != NULL) {
-                        ++ raw_pages;
-                        page->header.type = XNU_PAGE_RAW;
-                        atomic_set(&page->count, 1);
-                } else {
-                        cfs_free(page);
-                        page = NULL;
-                }
-        }
-        return page != NULL ? &page->header : NULL;
+       page = kmalloc(sizeof *page, flags);
+       if (page != NULL) {
+               page->virtual = kmem_cache_alloc(raw_page_cache, flags);
+               if (page->virtual != NULL) {
+                       ++raw_pages;
+                       page->header.type = XNU_PAGE_RAW;
+                       atomic_set(&page->count, 1);
+               } else {
+                       kfree(page);
+                       page = NULL;
+               }
+       }
+       return page != NULL ? &page->header : NULL;
 }
 
-void cfs_free_page(cfs_page_t *pages)
+void __free_page(struct page *pages)
 {
-        free_raw_page(as_raw(pages));
+       free_raw_page(as_raw(pages));
 }
 
-void cfs_get_page(cfs_page_t *p)
+void get_page(struct page *p)
 {
-        atomic_inc(&as_raw(p)->count);
+       atomic_inc(&as_raw(p)->count);
 }
 
-int cfs_put_page_testzero(cfs_page_t *p)
+int cfs_put_page_testzero(struct page *p)
 {
        return atomic_dec_and_test(&as_raw(p)->count);
 }
 
-int cfs_page_count(cfs_page_t *p)
+int page_count(struct page *p)
 {
-        return atomic_read(&as_raw(p)->count);
+       return atomic_read(&as_raw(p)->count);
 }
 
 /*
  * Generic page operations
  */
 
-void *cfs_page_address(cfs_page_t *pg)
+void *page_address(struct page *pg)
 {
-        /*
-         * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
-         * from here: this will lead to infinite recursion.
-         */
-        LASSERT(page_type_is_valid(pg));
-        return page_ops[pg->type]->page_address(pg);
+       /*
+        * XXX nikita: do NOT call libcfs_debug_msg() (CDEBUG/ENTRY/EXIT)
+        * from here: this will lead to infinite recursion.
+        */
+       LASSERT(page_type_is_valid(pg));
+       return page_ops[pg->type]->page_address(pg);
 }
 
-void *cfs_kmap(cfs_page_t *pg)
+void *kmap(struct page *pg)
 {
-        LASSERT(page_type_is_valid(pg));
-        return page_ops[pg->type]->page_map(pg);
+       LASSERT(page_type_is_valid(pg));
+       return page_ops[pg->type]->page_map(pg);
 }
 
-void cfs_kunmap(cfs_page_t *pg)
+void kunmap(struct page *pg)
 {
-        LASSERT(page_type_is_valid(pg));
-        page_ops[pg->type]->page_unmap(pg);
+       LASSERT(page_type_is_valid(pg));
+       page_ops[pg->type]->page_unmap(pg);
 }
 
 void xnu_page_ops_register(int type, struct xnu_page_ops *ops)
@@ -403,39 +403,39 @@ extern int get_preemption_level(void);
 #define get_preemption_level() (0)
 #endif
 
-void *cfs_alloc(size_t nr_bytes, u_int32_t flags)
+void *kmalloc(size_t nr_bytes, u_int32_t flags)
 {
-        int mflags;
+       int mflags;
 
-        mflags = 0;
-        if (flags & CFS_ALLOC_ATOMIC) {
-                mflags |= M_NOWAIT;
-        } else {
-                LASSERT(get_preemption_level() == 0);
-                mflags |= M_WAITOK;
-        }
+       mflags = 0;
+       if (flags & GFP_ATOMIC) {
+               mflags |= M_NOWAIT;
+       } else {
+               LASSERT(get_preemption_level() == 0);
+               mflags |= M_WAITOK;
+       }
 
-        if (flags & CFS_ALLOC_ZERO)
-                mflags |= M_ZERO;
+       if (flags & __GFP_ZERO)
+               mflags |= M_ZERO;
 
-        return _MALLOC(nr_bytes, M_TEMP, mflags);
+       return _MALLOC(nr_bytes, M_TEMP, mflags);
 }
 
-void cfs_free(void *addr)
+void kfree(void *addr)
 {
-        return _FREE(addr, M_TEMP);
+       return _FREE(addr, M_TEMP);
 }
 
-void *cfs_alloc_large(size_t nr_bytes)
+void *vmalloc(size_t nr_bytes)
 {
-        LASSERT(get_preemption_level() == 0);
-        return _MALLOC(nr_bytes, M_TEMP, M_WAITOK);
+       LASSERT(get_preemption_level() == 0);
+       return _MALLOC(nr_bytes, M_TEMP, M_WAITOK);
 }
 
-void  cfs_free_large(void *addr)
+void  vfree(void *addr)
 {
-        LASSERT(get_preemption_level() == 0);
-        return _FREE(addr, M_TEMP);
+       LASSERT(get_preemption_level() == 0);
+       return _FREE(addr, M_TEMP);
 }
 
 /*
@@ -477,7 +477,8 @@ int cfs_mem_init(void)
 #endif
        CFS_INIT_LIST_HEAD(&page_death_row);
        spin_lock_init(&page_death_row_phylax);
-       raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
+       raw_page_cache = kmem_cache_create("raw-page", PAGE_CACHE_SIZE,
+                                          0, 0, NULL);
        return 0;
 }
 
@@ -485,7 +486,7 @@ void cfs_mem_fini(void)
 {
        raw_page_death_row_clean();
        spin_lock_done(&page_death_row_phylax);
-       cfs_mem_cache_destroy(raw_page_cache);
+       kmem_cache_destroy(raw_page_cache);
 
 #if CFS_INDIVIDUAL_ZONE
        cfs_zone_nob.z_nob = NULL;
index b91e9dc..406eb7e 100644 (file)
@@ -179,9 +179,9 @@ libcfs_ipif_enumerate (char ***namesp)
         nalloc = 16;    /* first guess at max interfaces */
         toobig = 0;
         for (;;) {
-                if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
+               if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
                         toobig = 1;
-                        nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
+                       nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
                         CWARN("Too many interfaces: only enumerating first %d\n",
                               nalloc);
                 }
@@ -821,9 +821,9 @@ libcfs_ipif_enumerate (char ***namesp)
         nalloc = 16;    /* first guess at max interfaces */
         toobig = 0;
         for (;;) {
-                if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
+               if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
                         toobig = 1;
-                        nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
+                       nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
                         CWARN("Too many interfaces: only enumerating first %d\n",
                               nalloc);
                 }
index 0ecce6d..f8832a0 100644 (file)
@@ -122,7 +122,7 @@ struct trace_cpu_data *trace_get_tcd(void)
        tcd = &trace_data[0].tcd;
         CFS_INIT_LIST_HEAD(&pages);
        if (get_preemption_level() == 0)
-               nr_pages = trace_refill_stock(tcd, CFS_ALLOC_STD, &pages);
+               nr_pages = trace_refill_stock(tcd, GFP_IOFS, &pages);
        else
                nr_pages = 0;
        spin_lock(&trace_cpu_serializer);
index c80af9c..cfa027e 100644 (file)
@@ -414,7 +414,7 @@ int libcfs_debug_init(unsigned long bufsize)
                 max = TCD_MAX_PAGES;
         } else {
                 max = (max / cfs_num_possible_cpus());
-                max = (max << (20 - CFS_PAGE_SHIFT));
+               max = (max << (20 - PAGE_CACHE_SHIFT));
         }
         rc = cfs_tracefile_init(max);
 
index 3a4f168..f9362ea 100644 (file)
@@ -41,7 +41,7 @@
 do {                                                                   \
        if ((h)->cbh_flags & CBH_FLAG_ATOMIC_GROW)                      \
                LIBCFS_CPT_ALLOC_GFP((ptr), h->cbh_cptab, h->cbh_cptid, \
-                                    CBH_NOB, CFS_ALLOC_ATOMIC);        \
+                                    CBH_NOB, GFP_ATOMIC);              \
        else                                                            \
                LIBCFS_CPT_ALLOC((ptr), h->cbh_cptab, h->cbh_cptid,     \
                                 CBH_NOB);                              \
index 08974d1..4cac2e5 100644 (file)
@@ -215,7 +215,7 @@ int libcfs_kkuc_group_add(struct file *filp, int uid, int group, __u32 data)
                 return -EBADF;
 
         /* freed in group_rem */
-        reg = cfs_alloc(sizeof(*reg), 0);
+       reg = kmalloc(sizeof(*reg), 0);
         if (reg == NULL)
                 return -ENOMEM;
 
@@ -262,7 +262,7 @@ int libcfs_kkuc_group_rem(int uid, int group)
                                reg->kr_uid, reg->kr_fp, group);
                         if (reg->kr_fp != NULL)
                                fput(reg->kr_fp);
-                       cfs_free(reg);
+                       kfree(reg);
                }
        }
        up_write(&kg_sem);
index db376b5..73276e7 100644 (file)
@@ -140,7 +140,7 @@ char *cfs_strdup(const char *str, u_int32_t flags)
 
         lenz = strlen(str) + 1;
 
-        dup_str = cfs_alloc(lenz, flags);
+       dup_str = kmalloc(lenz, flags);
         if (dup_str == NULL)
                 return NULL;
 
index 2f14540..7ae502f 100644 (file)
@@ -205,14 +205,14 @@ struct cfs_crypto_hash_desc *
        int                  err;
        const struct cfs_crypto_hash_type       *type;
 
-       hdesc = cfs_alloc(sizeof(*hdesc), 0);
+       hdesc = kmalloc(sizeof(*hdesc), 0);
        if (hdesc == NULL)
                return ERR_PTR(-ENOMEM);
 
        err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
 
        if (err) {
-               cfs_free(hdesc);
+               kfree(hdesc);
                return ERR_PTR(err);
        }
        return (struct cfs_crypto_hash_desc *)hdesc;
@@ -220,7 +220,7 @@ struct cfs_crypto_hash_desc *
 EXPORT_SYMBOL(cfs_crypto_hash_init);
 
 int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
-                               cfs_page_t *page, unsigned int offset,
+                               struct page *page, unsigned int offset,
                                unsigned int len)
 {
        struct scatterlist sl;
@@ -252,7 +252,7 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
 
        if (hash_len == NULL) {
                crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
-               cfs_free(hdesc);
+               kfree(hdesc);
                return 0;
        }
        if (hash == NULL || *hash_len < size) {
@@ -266,7 +266,7 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
                return err;
        }
        crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
-       cfs_free(hdesc);
+       kfree(hdesc);
        return err;
 }
 EXPORT_SYMBOL(cfs_crypto_hash_final);
@@ -326,7 +326,7 @@ static int cfs_crypto_test_hashes(void)
         * kmalloc size for 2.6.18 kernel is 128K */
        unsigned int        data_len = 1 * 128 * 1024;
 
-       data = cfs_alloc(data_len, 0);
+       data = kmalloc(data_len, 0);
        if (data == NULL) {
                CERROR("Failed to allocate mem\n");
                return -ENOMEM;
@@ -338,7 +338,7 @@ static int cfs_crypto_test_hashes(void)
        for (i = 0; i < CFS_HASH_ALG_MAX; i++)
                cfs_crypto_performance_test(i, data, data_len);
 
-       cfs_free(data);
+       kfree(data);
        return 0;
 }
 
index 905befa..dd5d6ce 100644 (file)
@@ -275,19 +275,19 @@ int cfs_get_environ(const char *key, char *value, int *val_len)
 {
        struct mm_struct *mm;
        char *buffer, *tmp_buf = NULL;
-       int buf_len = CFS_PAGE_SIZE;
+       int buf_len = PAGE_CACHE_SIZE;
        int key_len = strlen(key);
        unsigned long addr;
        int rc;
        ENTRY;
 
-       buffer = cfs_alloc(buf_len, CFS_ALLOC_USER);
+       buffer = kmalloc(buf_len, GFP_USER);
        if (!buffer)
                RETURN(-ENOMEM);
 
        mm = get_task_mm(current);
        if (!mm) {
-               cfs_free(buffer);
+               kfree(buffer);
                RETURN(-EINVAL);
        }
 
@@ -363,9 +363,9 @@ int cfs_get_environ(const char *key, char *value, int *val_len)
 
 out:
        mmput(mm);
-       cfs_free((void *)buffer);
+       kfree((void *)buffer);
        if (tmp_buf)
-               cfs_free((void *)tmp_buf);
+               kfree((void *)tmp_buf);
        return rc;
 }
 EXPORT_SYMBOL(cfs_get_environ);
index c174d54..cc18de2 100644 (file)
 #include <linux/highmem.h>
 #include <libcfs/libcfs.h>
 
-static unsigned int cfs_alloc_flags_to_gfp(u_int32_t flags)
-{
-       unsigned int mflags = 0;
-
-        if (flags & CFS_ALLOC_ATOMIC)
-                mflags |= __GFP_HIGH;
-        else
-                mflags |= __GFP_WAIT;
-        if (flags & CFS_ALLOC_NOWARN)
-                mflags |= __GFP_NOWARN;
-        if (flags & CFS_ALLOC_IO)
-                mflags |= __GFP_IO;
-        if (flags & CFS_ALLOC_FS)
-                mflags |= __GFP_FS;
-       if (flags & CFS_ALLOC_HIGHMEM)
-               mflags |= __GFP_HIGHMEM;
-        return mflags;
-}
-
-void *
-cfs_alloc(size_t nr_bytes, u_int32_t flags)
-{
-       void *ptr = NULL;
-
-       ptr = kmalloc(nr_bytes, cfs_alloc_flags_to_gfp(flags));
-       if (ptr != NULL && (flags & CFS_ALLOC_ZERO))
-               memset(ptr, 0, nr_bytes);
-       return ptr;
-}
-
-void
-cfs_free(void *addr)
-{
-       kfree(addr);
-}
-
-void *
-cfs_alloc_large(size_t nr_bytes)
-{
-       return vmalloc(nr_bytes);
-}
-
-void
-cfs_free_large(void *addr)
-{
-       vfree(addr);
-}
-
-cfs_page_t *cfs_alloc_page(unsigned int flags)
-{
-        /*
-         * XXX nikita: do NOT call portals_debug_msg() (CDEBUG/ENTRY/EXIT)
-         * from here: this will lead to infinite recursion.
-         */
-        return alloc_page(cfs_alloc_flags_to_gfp(flags));
-}
-
-void cfs_free_page(cfs_page_t *page)
-{
-        __free_page(page);
-}
-
-cfs_mem_cache_t *
-cfs_mem_cache_create (const char *name, size_t size, size_t offset,
-                      unsigned long flags)
-{
-#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
-        return kmem_cache_create(name, size, offset, flags, NULL, NULL);
-#else
-        return kmem_cache_create(name, size, offset, flags, NULL);
-#endif
-}
-
-int
-cfs_mem_cache_destroy (cfs_mem_cache_t * cachep)
-{
-#ifdef HAVE_KMEM_CACHE_DESTROY_INT
-        return kmem_cache_destroy(cachep);
-#else
-        kmem_cache_destroy(cachep);
-        return 0;
-#endif
-}
-
-void *
-cfs_mem_cache_alloc(cfs_mem_cache_t *cachep, int flags)
-{
-        return kmem_cache_alloc(cachep, cfs_alloc_flags_to_gfp(flags));
-}
-
-void
-cfs_mem_cache_free(cfs_mem_cache_t *cachep, void *objp)
-{
-        return kmem_cache_free(cachep, objp);
-}
-
-/**
- * Returns true if \a addr is an address of an allocated object in a slab \a
- * kmem. Used in assertions. This check is optimistically imprecise, i.e., it
- * occasionally returns true for the incorrect addresses, but if it returns
- * false, then the addresses is guaranteed to be incorrect.
- */
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
-{
-#ifdef CONFIG_SLAB
-        struct page *page;
-
-        /*
-         * XXX Copy of mm/slab.c:virt_to_cache(). It won't work with other
-         * allocators, like slub and slob.
-         */
-        page = virt_to_page(addr);
-        if (unlikely(PageCompound(page)))
-                page = (struct page *)page->private;
-        return PageSlab(page) && ((void *)page->lru.next) == kmem;
-#else
-        return 1;
-#endif
-}
-EXPORT_SYMBOL(cfs_mem_is_in_cache);
-
-
-EXPORT_SYMBOL(cfs_alloc);
-EXPORT_SYMBOL(cfs_free);
-EXPORT_SYMBOL(cfs_alloc_large);
-EXPORT_SYMBOL(cfs_free_large);
-EXPORT_SYMBOL(cfs_alloc_page);
-EXPORT_SYMBOL(cfs_free_page);
-EXPORT_SYMBOL(cfs_mem_cache_create);
-EXPORT_SYMBOL(cfs_mem_cache_destroy);
-EXPORT_SYMBOL(cfs_mem_cache_alloc);
-EXPORT_SYMBOL(cfs_mem_cache_free);
-
-/*
- * NB: we will rename some of above functions in another patch:
- * - rename cfs_alloc to cfs_malloc
- * - rename cfs_alloc/free_page to cfs_page_alloc/free
- * - rename cfs_alloc/free_large to cfs_vmalloc/vfree
- */
-
 void *
 cfs_cpt_malloc(struct cfs_cpt_table *cptab, int cpt,
               size_t nr_bytes, unsigned int flags)
 {
        void    *ptr;
 
-       ptr = kmalloc_node(nr_bytes, cfs_alloc_flags_to_gfp(flags),
+       ptr = kmalloc_node(nr_bytes, flags,
                           cfs_cpt_spread_node(cptab, cpt));
-       if (ptr != NULL && (flags & CFS_ALLOC_ZERO) != 0)
+       if (ptr != NULL && (flags & __GFP_ZERO) != 0)
                memset(ptr, 0, nr_bytes);
 
        return ptr;
@@ -203,19 +63,18 @@ cfs_cpt_vmalloc(struct cfs_cpt_table *cptab, int cpt, size_t nr_bytes)
 }
 EXPORT_SYMBOL(cfs_cpt_vmalloc);
 
-cfs_page_t *
+struct page *
 cfs_page_cpt_alloc(struct cfs_cpt_table *cptab, int cpt, unsigned int flags)
 {
-       return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt),
-                               cfs_alloc_flags_to_gfp(flags), 0);
+       return alloc_pages_node(cfs_cpt_spread_node(cptab, cpt), flags, 0);
 }
 EXPORT_SYMBOL(cfs_page_cpt_alloc);
 
 void *
-cfs_mem_cache_cpt_alloc(cfs_mem_cache_t *cachep, struct cfs_cpt_table *cptab,
+cfs_mem_cache_cpt_alloc(struct kmem_cache *cachep, struct cfs_cpt_table *cptab,
                        int cpt, unsigned int flags)
 {
-       return kmem_cache_alloc_node(cachep, cfs_alloc_flags_to_gfp(flags),
+       return kmem_cache_alloc_node(cachep, flags,
                                     cfs_cpt_spread_node(cptab, cpt));
 }
 EXPORT_SYMBOL(cfs_mem_cache_cpt_alloc);
index e7bd69e..d9849b9 100644 (file)
@@ -178,12 +178,12 @@ libcfs_ipif_enumerate (char ***namesp)
         nalloc = 16;        /* first guess at max interfaces */
         toobig = 0;
         for (;;) {
-                if (nalloc * sizeof(*ifr) > CFS_PAGE_SIZE) {
-                        toobig = 1;
-                        nalloc = CFS_PAGE_SIZE/sizeof(*ifr);
-                        CWARN("Too many interfaces: only enumerating first %d\n",
-                              nalloc);
-                }
+               if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
+                       toobig = 1;
+                       nalloc = PAGE_CACHE_SIZE/sizeof(*ifr);
+                       CWARN("Too many interfaces: only enumerating first %d\n",
+                             nalloc);
+               }
 
                 LIBCFS_ALLOC(ifr, nalloc * sizeof(*ifr));
                 if (ifr == NULL) {
index da33e01..a0168dc 100644 (file)
@@ -269,7 +269,7 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
 
 int cfs_trace_max_debug_mb(void)
 {
-       int  total_mb = (cfs_num_physpages >> (20 - PAGE_SHIFT));
+       int  total_mb = (num_physpages >> (20 - PAGE_SHIFT));
 
        return MAX(512, (total_mb * 80)/100);
 }
index ea5d076..0666b4c 100644 (file)
@@ -72,19 +72,19 @@ lwt_lookup_string (int *size, char *knl_ptr,
 
         *size = strnlen (knl_ptr, maxsize - 1) + 1;
 
-        if (user_ptr != NULL) {
-                if (user_size < 4)
-                        return (-EINVAL);
+       if (user_ptr != NULL) {
+               if (user_size < 4)
+                       return -EINVAL;
 
-                if (cfs_copy_to_user (user_ptr, knl_ptr, *size))
-                        return (-EFAULT);
+               if (copy_to_user(user_ptr, knl_ptr, *size))
+                       return -EFAULT;
 
-                /* Did I truncate the string?  */
-                if (knl_ptr[*size - 1] != 0)
-                        cfs_copy_to_user (user_ptr + *size - 4, "...", 4);
-        }
+               /* Did I truncate the string?  */
+               if (knl_ptr[*size - 1] != 0)
+                       copy_to_user(user_ptr + *size - 4, "...", 4);
+       }
 
-        return (0);
+       return 0;
 }
 
 int
@@ -115,7 +115,7 @@ lwt_control (int enable, int clear)
                         continue;
 
                 for (j = 0; j < lwt_pages_per_cpu; j++) {
-                        memset (p->lwtp_events, 0, CFS_PAGE_SIZE);
+                       memset(p->lwtp_events, 0, PAGE_CACHE_SIZE);
 
                         p = cfs_list_entry (p->lwtp_list.next,
                                             lwt_page_t, lwtp_list);
@@ -132,14 +132,14 @@ lwt_control (int enable, int clear)
 }
 
 int
-lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
-              void *user_ptr, int user_size)
+lwt_snapshot(cfs_cycles_t *now, int *ncpu, int *total_size,
+            void *user_ptr, int user_size)
 {
-        const int    events_per_page = CFS_PAGE_SIZE / sizeof(lwt_event_t);
-        const int    bytes_per_page = events_per_page * sizeof(lwt_event_t);
-        lwt_page_t  *p;
-        int          i;
-        int          j;
+       const int    events_per_page = PAGE_CACHE_SIZE / sizeof(lwt_event_t);
+       const int    bytes_per_page = events_per_page * sizeof(lwt_event_t);
+       lwt_page_t   *p;
+       int          i;
+       int          j;
 
         if (!cfs_capable(CFS_CAP_SYS_ADMIN))
                 return (-EPERM);
@@ -156,12 +156,12 @@ lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
                 p = lwt_cpus[i].lwtc_current_page;
 
                 if (p == NULL)
-                        return (-ENODATA);
+                       return -ENODATA;
 
-                for (j = 0; j < lwt_pages_per_cpu; j++) {
-                        if (cfs_copy_to_user(user_ptr, p->lwtp_events,
-                                             bytes_per_page))
-                                return (-EFAULT);
+               for (j = 0; j < lwt_pages_per_cpu; j++) {
+                       if (copy_to_user(user_ptr, p->lwtp_events,
+                                        bytes_per_page))
+                               return -EFAULT;
 
                         user_ptr = ((char *)user_ptr) + bytes_per_page;
                         p = cfs_list_entry(p->lwtp_list.next,
@@ -186,12 +186,12 @@ lwt_init ()
 
        /* NULL pointers, zero scalars */
        memset (lwt_cpus, 0, sizeof (lwt_cpus));
-        lwt_pages_per_cpu =
-                LWT_MEMORY / (cfs_num_online_cpus() * CFS_PAGE_SIZE);
+       lwt_pages_per_cpu =
+               LWT_MEMORY / (cfs_num_online_cpus() * PAGE_CACHE_SIZE);
 
        for (i = 0; i < cfs_num_online_cpus(); i++)
                for (j = 0; j < lwt_pages_per_cpu; j++) {
-                       struct page *page = alloc_page (GFP_KERNEL);
+                       struct page *page = alloc_page(GFP_KERNEL);
                        lwt_page_t  *lwtp;
 
                        if (page == NULL) {
@@ -210,7 +210,7 @@ lwt_init ()
 
                         lwtp->lwtp_page = page;
                         lwtp->lwtp_events = page_address(page);
-                       memset (lwtp->lwtp_events, 0, CFS_PAGE_SIZE);
+                       memset(lwtp->lwtp_events, 0, PAGE_CACHE_SIZE);
 
                        if (j == 0) {
                                CFS_INIT_LIST_HEAD (&lwtp->lwtp_list);
index 878be01..f3f8643 100644 (file)
 void
 kportal_memhog_free (struct libcfs_device_userstate *ldu)
 {
-        cfs_page_t **level0p = &ldu->ldu_memhog_root_page;
-        cfs_page_t **level1p;
-        cfs_page_t **level2p;
-        int           count1;
-        int           count2;
+       struct page **level0p = &ldu->ldu_memhog_root_page;
+       struct page **level1p;
+       struct page **level2p;
+       int           count1;
+       int           count2;
 
-        if (*level0p != NULL) {
+       if (*level0p != NULL) {
+               level1p = (struct page **)page_address(*level0p);
+               count1 = 0;
 
-                level1p = (cfs_page_t **)cfs_page_address(*level0p);
-                count1 = 0;
+               while (count1 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
+                      *level1p != NULL) {
 
-                while (count1 < CFS_PAGE_SIZE/sizeof(cfs_page_t *) &&
-                       *level1p != NULL) {
+                       level2p = (struct page **)page_address(*level1p);
+                       count2 = 0;
 
-                        level2p = (cfs_page_t **)cfs_page_address(*level1p);
-                        count2 = 0;
+                       while (count2 < PAGE_CACHE_SIZE/sizeof(struct page *) &&
+                              *level2p != NULL) {
 
-                        while (count2 < CFS_PAGE_SIZE/sizeof(cfs_page_t *) &&
-                               *level2p != NULL) {
+                               __free_page(*level2p);
+                               ldu->ldu_memhog_pages--;
+                               level2p++;
+                               count2++;
+                       }
 
-                                cfs_free_page(*level2p);
-                                ldu->ldu_memhog_pages--;
-                                level2p++;
-                                count2++;
-                        }
-
-                        cfs_free_page(*level1p);
-                        ldu->ldu_memhog_pages--;
-                        level1p++;
-                        count1++;
-                }
+                       __free_page(*level1p);
+                       ldu->ldu_memhog_pages--;
+                       level1p++;
+                       count1++;
+               }
 
-                cfs_free_page(*level0p);
-                ldu->ldu_memhog_pages--;
+               __free_page(*level0p);
+               ldu->ldu_memhog_pages--;
 
-                *level0p = NULL;
-        }
+               *level0p = NULL;
+       }
 
-        LASSERT (ldu->ldu_memhog_pages == 0);
+       LASSERT(ldu->ldu_memhog_pages == 0);
 }
 
 int
 kportal_memhog_alloc (struct libcfs_device_userstate *ldu, int npages, int flags)
 {
-        cfs_page_t **level0p;
-        cfs_page_t **level1p;
-        cfs_page_t **level2p;
-        int           count1;
-        int           count2;
+       struct page **level0p;
+       struct page **level1p;
+       struct page **level2p;
+       int           count1;
+       int           count2;
 
-        LASSERT (ldu->ldu_memhog_pages == 0);
-        LASSERT (ldu->ldu_memhog_root_page == NULL);
+       LASSERT(ldu->ldu_memhog_pages == 0);
+       LASSERT(ldu->ldu_memhog_root_page == NULL);
 
-        if (npages < 0)
-                return -EINVAL;
+       if (npages < 0)
+               return -EINVAL;
 
-        if (npages == 0)
-                return 0;
+       if (npages == 0)
+               return 0;
 
-        level0p = &ldu->ldu_memhog_root_page;
-        *level0p = cfs_alloc_page(flags);
-        if (*level0p == NULL)
-                return -ENOMEM;
-        ldu->ldu_memhog_pages++;
+       level0p = &ldu->ldu_memhog_root_page;
+       *level0p = alloc_page(flags);
+       if (*level0p == NULL)
+               return -ENOMEM;
+       ldu->ldu_memhog_pages++;
 
-        level1p = (cfs_page_t **)cfs_page_address(*level0p);
-        count1 = 0;
-        memset(level1p, 0, CFS_PAGE_SIZE);
+       level1p = (struct page **)page_address(*level0p);
+       count1 = 0;
+       memset(level1p, 0, PAGE_CACHE_SIZE);
 
-        while (ldu->ldu_memhog_pages < npages &&
-               count1 < CFS_PAGE_SIZE/sizeof(cfs_page_t *)) {
+       while (ldu->ldu_memhog_pages < npages &&
+              count1 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
 
-                if (cfs_signal_pending())
-                        return (-EINTR);
+               if (cfs_signal_pending())
+                       return -EINTR;
 
-                *level1p = cfs_alloc_page(flags);
-                if (*level1p == NULL)
-                        return -ENOMEM;
-                ldu->ldu_memhog_pages++;
+               *level1p = alloc_page(flags);
+               if (*level1p == NULL)
+                       return -ENOMEM;
+               ldu->ldu_memhog_pages++;
 
-                level2p = (cfs_page_t **)cfs_page_address(*level1p);
-                count2 = 0;
-                memset(level2p, 0, CFS_PAGE_SIZE);
+               level2p = (struct page **)page_address(*level1p);
+               count2 = 0;
+               memset(level2p, 0, PAGE_CACHE_SIZE);
 
-                while (ldu->ldu_memhog_pages < npages &&
-                       count2 < CFS_PAGE_SIZE/sizeof(cfs_page_t *)) {
+               while (ldu->ldu_memhog_pages < npages &&
+                      count2 < PAGE_CACHE_SIZE/sizeof(struct page *)) {
 
-                        if (cfs_signal_pending())
-                                return (-EINTR);
+                       if (cfs_signal_pending())
+                               return -EINTR;
 
-                        *level2p = cfs_alloc_page(flags);
-                        if (*level2p == NULL)
-                                return (-ENOMEM);
-                        ldu->ldu_memhog_pages++;
+                       *level2p = alloc_page(flags);
+                       if (*level2p == NULL)
+                               return -ENOMEM;
+                       ldu->ldu_memhog_pages++;
 
-                        level2p++;
-                        count2++;
-                }
+                       level2p++;
+                       count2++;
+               }
 
-                level1p++;
-                count1++;
-        }
+               level1p++;
+               count1++;
+       }
 
-        return 0;
+       return 0;
 }
 
 /* called when opening /dev/device */
@@ -326,16 +325,17 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
         RETURN(err);
 }
 
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
+static int libcfs_ioctl(struct cfs_psdev_file *pfile,
+                       unsigned long cmd, void *arg)
 {
-        char    *buf;
-        struct libcfs_ioctl_data *data;
-        int err = 0;
-        ENTRY;
-
-        LIBCFS_ALLOC_GFP(buf, 1024, CFS_ALLOC_STD);
-        if (buf == NULL)
-                RETURN(-ENOMEM);
+       char    *buf;
+       struct libcfs_ioctl_data *data;
+       int err = 0;
+       ENTRY;
+
+       LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS);
+       if (buf == NULL)
+               RETURN(-ENOMEM);
 
         /* 'cmd' and permissions get checked in our arch-specific caller */
         if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) {
index d4de5b9..a24e3e8 100644 (file)
@@ -171,13 +171,13 @@ libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
                    const char *format1, va_list args,
                    const char *format2, ...)
 {
-        struct timeval tv;
-        int            nob;
-        int            remain;
-        va_list        ap;
-        char           buf[CFS_PAGE_SIZE]; /* size 4096 used for compatimble
-                                            * with linux, where message can`t
-                                            * be exceed PAGE_SIZE */
+       struct timeval tv;
+       int            nob;
+       int            remain;
+       va_list        ap;
+       char           buf[PAGE_CACHE_SIZE]; /* size 4096 used for compatimble
+                                           * with linux, where message can`t
+                                           * be exceed PAGE_SIZE */
         int            console = 0;
         char *prefix = "Lustre";
 
index 2049908..3a96dae 100644 (file)
@@ -68,41 +68,41 @@ cfs_tage_from_list(cfs_list_t *list)
 
 static struct cfs_trace_page *cfs_tage_alloc(int gfp)
 {
-        cfs_page_t            *page;
-        struct cfs_trace_page *tage;
-
-        /* My caller is trying to free memory */
-        if (!cfs_in_interrupt() && cfs_memory_pressure_get())
-                return NULL;
-
-        /*
-         * Don't spam console with allocation failures: they will be reported
-         * by upper layer anyway.
-         */
-        gfp |= CFS_ALLOC_NOWARN;
-        page = cfs_alloc_page(gfp);
-        if (page == NULL)
-                return NULL;
+       struct page            *page;
+       struct cfs_trace_page *tage;
 
-        tage = cfs_alloc(sizeof(*tage), gfp);
-        if (tage == NULL) {
-                cfs_free_page(page);
-                return NULL;
-        }
+       /* My caller is trying to free memory */
+       if (!cfs_in_interrupt() && memory_pressure_get())
+               return NULL;
+
+       /*
+        * Don't spam console with allocation failures: they will be reported
+        * by upper layer anyway.
+        */
+       gfp |= __GFP_NOWARN;
+       page = alloc_page(gfp);
+       if (page == NULL)
+               return NULL;
+
+       tage = kmalloc(sizeof(*tage), gfp);
+       if (tage == NULL) {
+               __free_page(page);
+               return NULL;
+       }
 
-        tage->page = page;
-        cfs_atomic_inc(&cfs_tage_allocated);
-        return tage;
+       tage->page = page;
+       cfs_atomic_inc(&cfs_tage_allocated);
+       return tage;
 }
 
 static void cfs_tage_free(struct cfs_trace_page *tage)
 {
-        __LASSERT(tage != NULL);
-        __LASSERT(tage->page != NULL);
+       __LASSERT(tage != NULL);
+       __LASSERT(tage->page != NULL);
 
-        cfs_free_page(tage->page);
-        cfs_free(tage);
-        cfs_atomic_dec(&cfs_tage_allocated);
+       __free_page(tage->page);
+       kfree(tage);
+       cfs_atomic_dec(&cfs_tage_allocated);
 }
 
 static void cfs_tage_to_tail(struct cfs_trace_page *tage,
@@ -144,7 +144,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
         if (tcd->tcd_cur_pages > 0) {
                 __LASSERT(!cfs_list_empty(&tcd->tcd_pages));
                 tage = cfs_tage_from_list(tcd->tcd_pages.prev);
-                if (tage->used + len <= CFS_PAGE_SIZE)
+               if (tage->used + len <= PAGE_CACHE_SIZE)
                         return tage;
         }
 
@@ -154,9 +154,9 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
                        --tcd->tcd_cur_stock_pages;
                        cfs_list_del_init(&tage->linkage);
                } else {
-                       tage = cfs_tage_alloc(CFS_ALLOC_ATOMIC);
+                       tage = cfs_tage_alloc(GFP_ATOMIC);
                        if (unlikely(tage == NULL)) {
-                               if ((!cfs_memory_pressure_get() ||
+                               if ((!memory_pressure_get() ||
                                     cfs_in_interrupt()) && printk_ratelimit())
                                        printk(CFS_KERN_WARNING
                                               "cannot allocate a tage (%ld)\n",
@@ -225,7 +225,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
          * from here: this will lead to infinite recursion.
          */
 
-        if (len > CFS_PAGE_SIZE) {
+       if (len > PAGE_CACHE_SIZE) {
                 printk(CFS_KERN_ERR
                        "cowardly refusing to write %lu bytes in a page\n", len);
                 return NULL;
@@ -317,7 +317,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
         for (i = 0; i < 2; i++) {
                 tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
                 if (tage == NULL) {
-                        if (needed + known_size > CFS_PAGE_SIZE)
+                       if (needed + known_size > PAGE_CACHE_SIZE)
                                 mask |= D_ERROR;
 
                         cfs_trace_put_tcd(tcd);
@@ -325,10 +325,10 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
                         goto console;
                 }
 
-                string_buf = (char *)cfs_page_address(tage->page) +
+               string_buf = (char *)page_address(tage->page) +
                                         tage->used + known_size;
 
-                max_nob = CFS_PAGE_SIZE - tage->used - known_size;
+               max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
                 if (max_nob <= 0) {
                         printk(CFS_KERN_EMERG "negative max_nob: %d\n",
                                max_nob);
@@ -365,7 +365,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
                        "newline\n", file, msgdata->msg_line, msgdata->msg_fn);
 
         header.ph_len = known_size + needed;
-        debug_buf = (char *)cfs_page_address(tage->page) + tage->used;
+       debug_buf = (char *)page_address(tage->page) + tage->used;
 
         if (libcfs_debug_binary) {
                 memcpy(debug_buf, &header, sizeof(header));
@@ -392,7 +392,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
         __LASSERT(debug_buf == string_buf);
 
         tage->used += needed;
-        __LASSERT (tage->used <= CFS_PAGE_SIZE);
+       __LASSERT(tage->used <= PAGE_CACHE_SIZE);
 
 console:
         if ((mask & libcfs_printk) == 0) {
@@ -652,14 +652,14 @@ void cfs_trace_debug_print(void)
         collect_pages(&pc);
         cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
                                            struct cfs_trace_page, linkage) {
-                char *p, *file, *fn;
-                cfs_page_t *page;
+               char *p, *file, *fn;
+               struct page *page;
 
-                __LASSERT_TAGE_INVARIANT(tage);
+               __LASSERT_TAGE_INVARIANT(tage);
 
-                page = tage->page;
-                p = cfs_page_address(page);
-                while (p < ((char *)cfs_page_address(page) + tage->used)) {
+               page = tage->page;
+               p = page_address(page);
+               while (p < ((char *)page_address(page) + tage->used)) {
                         struct ptldebug_header *hdr;
                         int len;
                         hdr = (void *)p;
@@ -688,7 +688,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
        struct cfs_trace_page   *tmp;
        int rc;
 
-       CFS_DECL_MMSPACE;
+       DECL_MMSPACE;
 
        cfs_tracefile_write_lock();
 
@@ -711,13 +711,13 @@ int cfs_tracefile_dump_all_pages(char *filename)
 
         /* ok, for now, just write the pages.  in the future we'll be building
          * iobufs with the pages and calling generic_direct_IO */
-        CFS_MMSPACE_OPEN;
+       MMSPACE_OPEN;
         cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
                                            struct cfs_trace_page, linkage) {
 
                 __LASSERT_TAGE_INVARIANT(tage);
 
-               rc = filp_write(filp, cfs_page_address(tage->page),
+               rc = filp_write(filp, page_address(tage->page),
                                tage->used, filp_poff(filp));
                 if (rc != (int)tage->used) {
                         printk(CFS_KERN_WARNING "wanted to write %u but wrote "
@@ -729,7 +729,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
                 cfs_list_del(&tage->linkage);
                 cfs_tage_free(tage);
         }
-       CFS_MMSPACE_CLOSE;
+       MMSPACE_CLOSE;
        rc = filp_fsync(filp);
        if (rc)
                printk(CFS_KERN_ERR "sync returns %d\n", rc);
@@ -768,7 +768,7 @@ int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
         if (usr_buffer_nob > knl_buffer_nob)
                 return -EOVERFLOW;
 
-        if (cfs_copy_from_user((void *)knl_buffer,
+       if (copy_from_user((void *)knl_buffer,
                            (void *)usr_buffer, usr_buffer_nob))
                 return -EFAULT;
 
@@ -799,11 +799,11 @@ int cfs_trace_copyout_string(char *usr_buffer, int usr_buffer_nob,
         if (nob > usr_buffer_nob)
                 nob = usr_buffer_nob;
 
-        if (cfs_copy_to_user(usr_buffer, knl_buffer, nob))
+       if (copy_to_user(usr_buffer, knl_buffer, nob))
                 return -EFAULT;
 
         if (append != NULL && nob < usr_buffer_nob) {
-                if (cfs_copy_to_user(usr_buffer + nob, append, 1))
+               if (copy_to_user(usr_buffer + nob, append, 1))
                         return -EFAULT;
 
                 nob++;
@@ -815,10 +815,10 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
 
 int cfs_trace_allocate_string_buffer(char **str, int nob)
 {
-        if (nob > 2 * CFS_PAGE_SIZE)            /* string must be "sensible" */
+       if (nob > 2 * PAGE_CACHE_SIZE)  /* string must be "sensible" */
                 return -EINVAL;
 
-        *str = cfs_alloc(nob, CFS_ALLOC_STD | CFS_ALLOC_ZERO);
+       *str = kmalloc(nob, GFP_IOFS | __GFP_ZERO);
         if (*str == NULL)
                 return -ENOMEM;
 
@@ -827,7 +827,7 @@ int cfs_trace_allocate_string_buffer(char **str, int nob)
 
 void cfs_trace_free_string_buffer(char *str, int nob)
 {
-        cfs_free(str);
+       kfree(str);
 }
 
 int cfs_trace_dump_debug_buffer_usrstr(void *usr_str, int usr_str_nob)
@@ -937,7 +937,7 @@ int cfs_trace_set_debug_mb(int mb)
         }
 
         mb /= cfs_num_possible_cpus();
-        pages = mb << (20 - CFS_PAGE_SHIFT);
+       pages = mb << (20 - PAGE_CACHE_SHIFT);
 
         cfs_tracefile_write_lock();
 
@@ -975,7 +975,7 @@ int cfs_trace_get_debug_mb(void)
 
         cfs_tracefile_read_unlock();
 
-        return (total_pages >> (20 - CFS_PAGE_SHIFT)) + 1;
+       return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
 }
 
 static int tracefiled(void *arg)
@@ -988,7 +988,7 @@ static int tracefiled(void *arg)
        int last_loop = 0;
        int rc;
 
-       CFS_DECL_MMSPACE;
+       DECL_MMSPACE;
 
        /* we're started late enough that we pick up init's fs context */
        /* this is so broken in uml?  what on earth is going on? */
@@ -1024,7 +1024,7 @@ static int tracefiled(void *arg)
                         goto end_loop;
                 }
 
-                CFS_MMSPACE_OPEN;
+               MMSPACE_OPEN;
 
                 cfs_list_for_each_entry_safe_typed(tage, tmp, &pc.pc_pages,
                                                    struct cfs_trace_page,
@@ -1038,7 +1038,7 @@ static int tracefiled(void *arg)
                        else if (f_pos > (off_t)filp_size(filp))
                                f_pos = filp_size(filp);
 
-                       rc = filp_write(filp, cfs_page_address(tage->page),
+                       rc = filp_write(filp, page_address(tage->page),
                                        tage->used, &f_pos);
                         if (rc != (int)tage->used) {
                                 printk(CFS_KERN_WARNING "wanted to write %u "
@@ -1047,7 +1047,7 @@ static int tracefiled(void *arg)
                                 __LASSERT(cfs_list_empty(&pc.pc_pages));
                         }
                 }
-               CFS_MMSPACE_CLOSE;
+               MMSPACE_CLOSE;
 
                filp_close(filp, NULL);
                 put_pages_on_daemon_list(&pc);
index 968c128..196ab96 100644 (file)
@@ -92,7 +92,7 @@ extern void libcfs_unregister_panic_notifier(void);
 extern int  libcfs_panic_in_progress;
 extern int  cfs_trace_max_debug_mb(void);
 
-#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 #define CFS_TRACEFILE_SIZE (500 << 20)
 
@@ -101,7 +101,7 @@ extern int  cfs_trace_max_debug_mb(void);
 /*
  * Private declare for tracefile
  */
-#define TCD_MAX_PAGES (5 << (20 - CFS_PAGE_SHIFT))
+#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
 #define TCD_STOCK_PAGES (TCD_MAX_PAGES)
 
 #define CFS_TRACEFILE_SIZE (500 << 20)
@@ -239,7 +239,7 @@ struct cfs_trace_page {
        /*
         * page itself
         */
-       cfs_page_t          *page;
+       struct page          *page;
        /*
         * linkage into one of the lists in trace_data_union or
         * page_collection
@@ -337,8 +337,8 @@ do {                                                                    \
 do {                                                                    \
         __LASSERT(tage != NULL);                                        \
         __LASSERT(tage->page != NULL);                                  \
-        __LASSERT(tage->used <= CFS_PAGE_SIZE);                         \
-        __LASSERT(cfs_page_count(tage->page) > 0);                      \
+       __LASSERT(tage->used <= PAGE_CACHE_SIZE);                         \
+       __LASSERT(page_count(tage->page) > 0);                      \
 } while (0)
 
 #endif /* LUSTRE_TRACEFILE_PRIVATE */
index 224ca77..5ad136a 100644 (file)
@@ -214,7 +214,7 @@ struct cfs_crypto_hash_desc
                return ERR_PTR(-ENODEV);
        }
 
-       hdesc = cfs_alloc(sizeof(*hdesc) + ha->ha_ctx_size, 0);
+       hdesc = kmalloc(sizeof(*hdesc) + ha->ha_ctx_size, 0);
        if (hdesc == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -225,7 +225,7 @@ struct cfs_crypto_hash_desc
                if (err == 0) {
                        return (struct cfs_crypto_hash_desc *) hdesc;
                } else {
-                       cfs_free(hdesc);
+                       kfree(hdesc);
                        return ERR_PTR(err);
                }
        }
@@ -241,7 +241,7 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf,
 }
 
 int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
-                               cfs_page_t *page, unsigned int offset,
+                               struct page *page, unsigned int offset,
                                unsigned int len)
 {
        const void *p = page->addr + offset;
@@ -262,7 +262,7 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc,
        int     err;
 
        if (hash_len == NULL) {
-               cfs_free(d);
+               kfree(d);
                return 0;
        }
        if (hash == NULL || *hash_len < size) {
@@ -274,7 +274,7 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc,
        err = d->hd_hash->final(d->hd_ctx, hash, *hash_len);
        if (err == 0) {
                  /* If get final digest success free hash descriptor */
-                 cfs_free(d);
+                 kfree(d);
        }
 
        return err;
@@ -370,7 +370,7 @@ static int cfs_crypto_test_hashes(void)
        unsigned char      *data;
        unsigned int        j, data_len = 1024 * 1024;
 
-       data = cfs_alloc(data_len, 0);
+       data = kmalloc(data_len, 0);
        if (data == NULL) {
                CERROR("Failed to allocate mem\n");
                return -ENOMEM;
@@ -381,7 +381,7 @@ static int cfs_crypto_test_hashes(void)
        for (i = 0; i < CFS_HASH_ALG_MAX; i++)
                cfs_crypto_performance_test(i, data, data_len);
 
-       cfs_free(data);
+       kfree(data);
        return 0;
 }
 
index 615fa00..af1d09c 100644 (file)
@@ -47,9 +47,9 @@
  * Allocator
  */
 
-cfs_page_t *cfs_alloc_page(unsigned int flags)
+struct page *alloc_page(unsigned int flags)
 {
-        cfs_page_t *pg = malloc(sizeof(*pg));
+       struct page *pg = malloc(sizeof(*pg));
         int rc = 0;
 
         if (!pg)
@@ -57,11 +57,11 @@ cfs_page_t *cfs_alloc_page(unsigned int flags)
         pg->addr = NULL;
 
 #if defined (__DARWIN__)
-        pg->addr = valloc(CFS_PAGE_SIZE);
+       pg->addr = valloc(PAGE_CACHE_SIZE);
 #elif defined (__WINNT__)
         pg->addr = pgalloc(0);
 #else
-        rc = posix_memalign(&pg->addr, CFS_PAGE_SIZE, CFS_PAGE_SIZE);
+       rc = posix_memalign(&pg->addr, PAGE_CACHE_SIZE, PAGE_CACHE_SIZE);
 #endif
         if (rc != 0 || pg->addr == NULL) {
                 free(pg);
@@ -70,7 +70,7 @@ cfs_page_t *cfs_alloc_page(unsigned int flags)
         return pg;
 }
 
-void cfs_free_page(cfs_page_t *pg)
+void __free_page(struct page *pg)
 {
 #if defined (__WINNT__)
         pgfree(pg->addr);
@@ -81,17 +81,17 @@ void cfs_free_page(cfs_page_t *pg)
         free(pg);
 }
 
-void *cfs_page_address(cfs_page_t *pg)
+void *page_address(struct page *pg)
 {
         return pg->addr;
 }
 
-void *cfs_kmap(cfs_page_t *pg)
+void *kmap(struct page *pg)
 {
         return pg->addr;
 }
 
-void cfs_kunmap(cfs_page_t *pg)
+void kunmap(struct page *pg)
 {
 }
 
@@ -99,10 +99,11 @@ void cfs_kunmap(cfs_page_t *pg)
  * SLAB allocator
  */
 
-cfs_mem_cache_t *
-cfs_mem_cache_create(const char *name, size_t objsize, size_t off, unsigned long flags)
+struct kmem_cache *
+kmem_cache_create(const char *name, size_t objsize, size_t off,
+                 unsigned long flags, void *ctor)
 {
-        cfs_mem_cache_t *c;
+       struct kmem_cache *c;
 
         c = malloc(sizeof(*c));
         if (!c)
@@ -113,21 +114,20 @@ cfs_mem_cache_create(const char *name, size_t objsize, size_t off, unsigned long
         return c;
 }
 
-int cfs_mem_cache_destroy(cfs_mem_cache_t *c)
+void kmem_cache_destroy(struct kmem_cache *c)
 {
         CDEBUG(D_MALLOC, "destroy slab cache %p, objsize %u\n", c, c->size);
         free(c);
-        return 0;
 }
 
-void *cfs_mem_cache_alloc(cfs_mem_cache_t *c, int gfp)
+void *kmem_cache_alloc(struct kmem_cache *c, int gfp)
 {
-        return cfs_alloc(c->size, gfp);
+       return kmalloc(c->size, gfp);
 }
 
-void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr)
+void kmem_cache_free(struct kmem_cache *c, void *addr)
 {
-        cfs_free(addr);
+       kfree(addr);
 }
 
 /**
@@ -136,7 +136,7 @@ void cfs_mem_cache_free(cfs_mem_cache_t *c, void *addr)
  * occasionally returns true for the incorrect addresses, but if it returns
  * false, then the addresses is guaranteed to be incorrect.
  */
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem)
 {
         return 1;
 }
index ba5328f..e59719e 100644 (file)
@@ -155,18 +155,12 @@ struct idr_context * cfs_win_task_slot_idp = NULL;
  *  task slot routiens
  */
 
-PTASK_SLOT
-alloc_task_slot()
+PTASK_SLOT alloc_task_slot()
 {
-    PTASK_SLOT task = NULL;
-
-    if (cfs_win_task_manger.slab) {
-        task = cfs_mem_cache_alloc(cfs_win_task_manger.slab, 0);
-    } else {
-        task = cfs_alloc(sizeof(TASK_SLOT), 0);
-    }
-
-    return task;
+       if (cfs_win_task_manger.slab)
+               return kmem_cache_alloc(cfs_win_task_manger.slab, 0);
+       else
+               return kmalloc(sizeof(TASK_SLOT), 0);
 }
 
 void
@@ -178,18 +172,15 @@ init_task_slot(PTASK_SLOT task)
     cfs_init_event(&task->Event, TRUE, FALSE);
 }
 
-void
-cleanup_task_slot(PTASK_SLOT task)
+void cleanup_task_slot(PTASK_SLOT task)
 {
-    if (task->task.pid) {
-        cfs_idr_remove(cfs_win_task_slot_idp, task->task.pid);
-    }
+       if (task->task.pid)
+               cfs_idr_remove(cfs_win_task_slot_idp, task->task.pid);
 
-    if (cfs_win_task_manger.slab) {
-        cfs_mem_cache_free(cfs_win_task_manger.slab, task);
-    } else {
-        cfs_free(task);
-    }
+       if (cfs_win_task_manger.slab)
+               kmem_cache_free(cfs_win_task_manger.slab, task);
+       else
+               kfree(task);
 }
 
 /*
@@ -243,9 +234,9 @@ init_task_manager()
     /* initialize the spinlock protection */
        spin_lock_init(&cfs_win_task_manger.Lock);
 
-    /* create slab memory cache */
-    cfs_win_task_manger.slab = cfs_mem_cache_create(
-        "TSLT", sizeof(TASK_SLOT), 0, 0);
+       /* create slab memory cache */
+       cfs_win_task_manger.slab = kmem_cache_create("TSLT", sizeof(TASK_SLOT),
+                                                    0, 0, NULL);
 
     /* intialize the list header */
     InitializeListHead(&(cfs_win_task_manger.TaskList));
@@ -300,9 +291,9 @@ cleanup_task_manager()
 
        spin_unlock(&cfs_win_task_manger.Lock);
 
-    /* destroy the taskslot cache slab */
-    cfs_mem_cache_destroy(cfs_win_task_manger.slab);
-    memset(&cfs_win_task_manger, 0, sizeof(TASK_MAN));
+       /* destroy the taskslot cache slab */
+       kmem_cache_destroy(cfs_win_task_manger.slab);
+       memset(&cfs_win_task_manger, 0, sizeof(TASK_MAN));
 }
 
 
index 9a5a2f6..b9b1027 100644 (file)
@@ -150,16 +150,16 @@ struct file *filp_open(const char *name, int flags, int mode, int *err)
                        return ERR_PTR(-EINVAL);
        }
 
-       AnsiString = cfs_alloc(sizeof(CHAR) * (NameLength + PrefixLength + 1),
-                               CFS_ALLOC_ZERO);
+       AnsiString = kmalloc(sizeof(CHAR) * (NameLength + PrefixLength + 1),
+                               __GFP_ZERO);
        if (NULL == AnsiString)
                return ERR_PTR(-ENOMEM);
 
        UnicodeString =
-               cfs_alloc(sizeof(WCHAR) * (NameLength + PrefixLength + 1),
-                         CFS_ALLOC_ZERO);
+               kmalloc(sizeof(WCHAR) * (NameLength + PrefixLength + 1),
+                         __GFP_ZERO);
        if (NULL == UnicodeString) {
-               cfs_free(AnsiString);
+               kfree(AnsiString);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -205,19 +205,19 @@ struct file *filp_open(const char *name, int flags, int mode, int *err)
 
        /* Check the returned status of IoStatus... */
        if (!NT_SUCCESS(IoStatus.Status)) {
-               cfs_free(UnicodeString);
-               cfs_free(AnsiString);
+               kfree(UnicodeString);
+               kfree(AnsiString);
                return ERR_PTR(cfs_error_code(IoStatus.Status));
        }
 
        /* Allocate the file_t: libcfs file object */
-       fp = cfs_alloc(sizeof(*fp) + NameLength, CFS_ALLOC_ZERO);
+       fp = kmalloc(sizeof(*fp) + NameLength, __GFP_ZERO);
 
        if (NULL == fp) {
                Status = ZwClose(FileHandle);
                ASSERT(NT_SUCCESS(Status));
-               cfs_free(UnicodeString);
-               cfs_free(AnsiString);
+               kfree(UnicodeString);
+               kfree(AnsiString);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -227,11 +227,11 @@ struct file *filp_open(const char *name, int flags, int mode, int *err)
     fp->f_mode  = (mode_t)mode;
     fp->f_count = 1;
 
-    /* free the memory of temporary name strings */
-    cfs_free(UnicodeString);
-    cfs_free(AnsiString);
+       /* free the memory of temporary name strings */
+       kfree(UnicodeString);
+       kfree(AnsiString);
 
-    return fp;
+       return fp;
 }
 
 
@@ -261,9 +261,9 @@ int filp_close(file_t *fp, void *id)
     Status = ZwClose(fp->f_handle);
     ASSERT(NT_SUCCESS(Status));
 
-    /* free the file flip structure */
-    cfs_free(fp);
-    return 0;
+       /* free the file flip structure */
+       kfree(fp);
+       return 0;
 }
 
 
@@ -683,6 +683,6 @@ void dput(struct dentry *de)
         return;
     }
     if (cfs_atomic_dec_and_test(&de->d_count)) {
-        cfs_free(de);
+       kfree(de);
     }
 }
index f6cfb1d..5dc7958 100644 (file)
 #include <libcfs/libcfs.h>
 
 
-cfs_mem_cache_t *cfs_page_t_slab = NULL;
-cfs_mem_cache_t *cfs_page_p_slab = NULL;
+struct kmem_cache *cfs_page_t_slab;
+struct kmem_cache *cfs_page_p_slab;
 
-cfs_page_t * virt_to_page(void * addr)
+struct page *virt_to_page(void *addr)
 {
-    cfs_page_t *pg;
-    pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
-    
-    if (NULL == pg) {
-        cfs_enter_debugger();
-        return NULL;
-    }
+       struct page *pg;
+       pg = kmem_cache_alloc(cfs_page_t_slab, 0);
+
+       if (NULL == pg) {
+               cfs_enter_debugger();
+               return NULL;
+       }
 
-    memset(pg, 0, sizeof(cfs_page_t));
-    pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
-    pg->mapping = addr;
-    cfs_atomic_set(&pg->count, 1);
+       memset(pg, 0, sizeof(struct page));
+       pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
+       pg->mapping = addr;
+       cfs_atomic_set(&pg->count, 1);
        set_bit(PG_virt, &(pg->flags));
-    cfs_enter_debugger();
-    return pg;
+       cfs_enter_debugger();
+       return pg;
 }
 
 /*
- * cfs_alloc_page
- *   To allocate the cfs_page_t and also 1 page of memory
+ * alloc_page
+ *   To allocate the struct page and also 1 page of memory
  *
  * Arguments:
  *   flags:  the allocation options
  *
  * Return Value:
- *   pointer to the cfs_page_t strcture in success or
+ *   pointer to the struct page strcture in success or
  *   NULL in failure case
  *
  * Notes: 
@@ -78,40 +78,39 @@ cfs_page_t * virt_to_page(void * addr)
 
 cfs_atomic_t libcfs_total_pages;
 
-cfs_page_t * cfs_alloc_page(int flags)
+struct page *alloc_page(int flags)
 {
-    cfs_page_t *pg;
-    pg = cfs_mem_cache_alloc(cfs_page_t_slab, 0);
-    
-    if (NULL == pg) {
-        cfs_enter_debugger();
-        return NULL;
-    }
+       struct page *pg;
+       pg = kmem_cache_alloc(cfs_page_t_slab, 0);
 
-    memset(pg, 0, sizeof(cfs_page_t));
-    pg->addr = cfs_mem_cache_alloc(cfs_page_p_slab, 0);
-    cfs_atomic_set(&pg->count, 1);
-
-    if (pg->addr) {
-        if (cfs_is_flag_set(flags, CFS_ALLOC_ZERO)) {
-            memset(pg->addr, 0, CFS_PAGE_SIZE);
-        }
-        cfs_atomic_inc(&libcfs_total_pages);
-    } else {
-        cfs_enter_debugger();
-        cfs_mem_cache_free(cfs_page_t_slab, pg);
-        pg = NULL;
-    }
+       if (NULL == pg) {
+       cfs_enter_debugger();
+       return NULL;
+       }
+
+       memset(pg, 0, sizeof(struct page));
+       pg->addr = kmem_cache_alloc(cfs_page_p_slab, 0);
+       cfs_atomic_set(&pg->count, 1);
+
+       if (pg->addr) {
+               if (cfs_is_flag_set(flags, __GFP_ZERO))
+                       memset(pg->addr, 0, PAGE_CACHE_SIZE);
+               cfs_atomic_inc(&libcfs_total_pages);
+       } else {
+               cfs_enter_debugger();
+               kmem_cache_free(cfs_page_t_slab, pg);
+               pg = NULL;
+       }
 
-    return pg;
+       return pg;
 }
 
 /*
- * cfs_free_page
- *   To free the cfs_page_t including the page
+ * __free_page
+ *   To free the struct page including the page
  *
  * Arguments:
- *   pg:  pointer to the cfs_page_t strcture
+ *   pg:  pointer to the struct page strcture
  *
  * Return Value:
  *   N/A
@@ -119,30 +118,30 @@ cfs_page_t * cfs_alloc_page(int flags)
  * Notes: 
  *   N/A
  */
-void cfs_free_page(cfs_page_t *pg)
+void __free_page(struct page *pg)
 {
-    ASSERT(pg != NULL);
-    ASSERT(pg->addr  != NULL);
-    ASSERT(cfs_atomic_read(&pg->count) <= 1);
+       ASSERT(pg != NULL);
+       ASSERT(pg->addr  != NULL);
+       ASSERT(cfs_atomic_read(&pg->count) <= 1);
 
        if (!test_bit(PG_virt, &pg->flags)) {
-        cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
-        cfs_atomic_dec(&libcfs_total_pages);
-    } else {
-        cfs_enter_debugger();
-    }
-    cfs_mem_cache_free(cfs_page_t_slab, pg);
+               kmem_cache_free(cfs_page_p_slab, pg->addr);
+               cfs_atomic_dec(&libcfs_total_pages);
+       } else {
+               cfs_enter_debugger();
+       }
+       kmem_cache_free(cfs_page_t_slab, pg);
 }
 
-int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
+int kmem_is_in_cache(const void *addr, const struct kmem_cache *kmem)
 {
-    KdPrint(("cfs_mem_is_in_cache: not implemented. (should maintain a"
-              "chain to keep all allocations traced.)\n"));
-    return 1;
+       KdPrint(("kmem_is_in_cache: not implemented. (should maintain a"
+                "chain to keep all allocations traced.)\n"));
+       return 1;
 }
 
 /*
- * cfs_alloc
+ * kmalloc
  *   To allocate memory from system pool
  *
  * Arguments:
@@ -158,25 +157,23 @@ int cfs_mem_is_in_cache(const void *addr, const cfs_mem_cache_t *kmem)
  */
 
 void *
-cfs_alloc(size_t nr_bytes, u_int32_t flags)
+kmalloc(size_t nr_bytes, u_int32_t flags)
 {
-    void *ptr;
+       void *ptr;
 
-    /* Ignore the flags: always allcoate from NonPagedPool */
-    ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs');
-    if (ptr != NULL && (flags & CFS_ALLOC_ZERO)) {
-        memset(ptr, 0, nr_bytes);
-    }
+       /* Ignore the flags: always allcoate from NonPagedPool */
+       ptr = ExAllocatePoolWithTag(NonPagedPool, nr_bytes, 'Lufs');
+       if (ptr != NULL && (flags & __GFP_ZERO))
+               memset(ptr, 0, nr_bytes);
 
-    if (!ptr) {
-        cfs_enter_debugger();
-    }
+       if (!ptr)
+               cfs_enter_debugger();
 
-    return ptr;
+       return ptr;
 }
 
 /*
- * cfs_free
+ * kfree
  *   To free the sepcified memory to system pool
  *
  * Arguments:
@@ -190,13 +187,13 @@ cfs_alloc(size_t nr_bytes, u_int32_t flags)
  */
 
 void
-cfs_free(void *addr)
+kfree(void *addr)
 {
-    ExFreePool(addr);
+       ExFreePool(addr);
 }
 
 /*
- * cfs_alloc_large
+ * vmalloc
  *   To allocate large block of memory from system pool
  *
  * Arguments:
@@ -211,13 +208,13 @@ cfs_free(void *addr)
  */
 
 void *
-cfs_alloc_large(size_t nr_bytes)
+vmalloc(size_t nr_bytes)
 {
-    return cfs_alloc(nr_bytes, 0);
+       return kmalloc(nr_bytes, 0);
 }
 
 /*
- * cfs_free_large
+ * vfree
  *   To free the sepcified memory to system pool
  *
  * Arguments:
@@ -230,15 +227,14 @@ cfs_alloc_large(size_t nr_bytes)
  *   N/A
  */
 
-void
-cfs_free_large(void *addr)
+void vfree(void *addr)
 {
-    cfs_free(addr);
+       kfree(addr);
 }
 
 
 /*
- * cfs_mem_cache_create
+ * kmem_cache_create
  *   To create a SLAB cache
  *
  * Arguments:
@@ -258,32 +254,26 @@ cfs_free_large(void *addr)
  *   3, parameters C/D are removed.
  */
 
-cfs_mem_cache_t *
-cfs_mem_cache_create(
-    const char * name,
-    size_t size,
-    size_t offset,
-    unsigned long flags
-    )
+struct kmem_cache *kmem_cache_create(const char *name, size_t size,
+                                    size_t offset, unsigned long flags,
+                                    void *ctor)
 {
-    cfs_mem_cache_t * kmc = NULL;
+       struct kmem_cache *kmc = NULL;
 
-    /*  The name of the SLAB could not exceed 20 chars */
+       /*  The name of the SLAB could not exceed 20 chars */
 
-    if (name && strlen(name) >= 20) {
-        goto errorout;
-    }
+       if (name && strlen(name) >= 20)
+               goto errorout;
 
-    /* Allocate and initialize the SLAB strcture */
+       /* Allocate and initialize the SLAB strcture */
 
-    kmc = cfs_alloc (sizeof(cfs_mem_cache_t), 0);
+       kmc = kmalloc(sizeof(struct kmem_cache), 0);
 
-    if (NULL == kmc) {
-        goto errorout;
-    }
+       if (NULL == kmc)
+               goto errorout;
 
-    memset(kmc, 0, sizeof(cfs_mem_cache_t));
-    kmc->flags = flags;
+       memset(kmc, 0, sizeof(struct kmem_cache));
+       kmc->flags = flags;
 
     if (name) {
         strcpy(&kmc->name[0], name);
@@ -306,7 +296,7 @@ errorout:
 }
 
 /*
- * cfs_mem_cache_destroy
+ *kmem_cache_destroy
  *   To destroy the unused SLAB cache
  *
  * Arguments:
@@ -320,19 +310,19 @@ errorout:
  *   N/A
  */
 
-int cfs_mem_cache_destroy (cfs_mem_cache_t * kmc)
+kmem_cache_destroy(struct kmem_cache *kmc)
 {
-    ASSERT(kmc != NULL);
+       ASSERT(kmc != NULL);
 
-    ExDeleteNPagedLookasideList(&(kmc->npll));
+       ExDeleteNPagedLookasideList(&(kmc->npll));
 
-    cfs_free(kmc);
+       kfree(kmc);
 
-    return 0;
+       return 0;
 }
 
 /*
- * cfs_mem_cache_alloc
+ * kmem_cache_alloc
  *   To allocate an object (LookAside entry) from the SLAB
  *
  * Arguments:
@@ -347,17 +337,17 @@ int cfs_mem_cache_destroy (cfs_mem_cache_t * kmc)
  *   N/A
  */
 
-void *cfs_mem_cache_alloc(cfs_mem_cache_t * kmc, int flags)
+void *kmem_cache_alloc(struct kmem_cache *kmc, int flags)
 {
-    void *buf = NULL;
+       void *buf = NULL;
 
-    buf = ExAllocateFromNPagedLookasideList(&(kmc->npll));
+       buf = ExAllocateFromNPagedLookasideList(&(kmc->npll));
 
-    return buf;
+       return buf;
 }
 
 /*
- * cfs_mem_cache_free
+ * kmem_cache_free
  *   To free an object (LookAside entry) to the SLAB cache
  *
  * Arguments:
@@ -371,7 +361,7 @@ void *cfs_mem_cache_alloc(cfs_mem_cache_t * kmc, int flags)
  *   N/A
  */
 
-void cfs_mem_cache_free(cfs_mem_cache_t * kmc, void * buf)
+void kmem_cache_free(struct kmem_cache *kmc, void *buf)
 {
     ExFreeToNPagedLookasideList(&(kmc->npll), buf);
 }
@@ -380,10 +370,10 @@ spinlock_t  shrinker_guard = {0};
 CFS_LIST_HEAD(shrinker_hdr);
 cfs_timer_t shrinker_timer = {0};
 
-struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
+struct shrinker *set_shrinker(int seeks, shrink_callback cb)
 {
-    struct cfs_shrinker * s = (struct cfs_shrinker *)
-        cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
+       struct shrinker *s = (struct shrinker *)
+       kmalloc(sizeof(struct shrinker), __GFP_ZERO);
        if (s) {
                s->cb = cb;
                s->seeks = seeks;
@@ -396,33 +386,33 @@ struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
        return s;
 }
 
-void cfs_remove_shrinker(struct cfs_shrinker *s)
+void remove_shrinker(struct shrinker *s)
 {
-       struct cfs_shrinker *tmp;
+       struct shrinker *tmp;
        spin_lock(&shrinker_guard);
 #if TRUE
-    cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
-                                  struct cfs_shrinker, list) {
-        if (tmp == s) {
-            cfs_list_del(&tmp->list);
-            break;
-        } 
-    }
+       cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
+                                     struct shrinker, list) {
+               if (tmp == s) {
+                       cfs_list_del(&tmp->list);
+                       break;
+               }
+       }
 #else
-    cfs_list_del(&s->list);
+       cfs_list_del(&s->list);
 #endif
        spin_unlock(&shrinker_guard);
-       cfs_free(s);
+       kfree(s);
 }
 
 /* time ut test proc */
 void shrinker_timer_proc(ulong_ptr_t arg)
 {
-       struct cfs_shrinker *s;
+       struct shrinker *s;
        spin_lock(&shrinker_guard);
 
        cfs_list_for_each_entry_typed(s, &shrinker_hdr,
-                                     struct cfs_shrinker, list) {
+                                     struct shrinker, list) {
                s->cb(s->nr, __GFP_FS);
        }
        spin_unlock(&shrinker_guard);
index 3a3b3f9..c11fff2 100644 (file)
@@ -49,7 +49,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
         hdr = (struct libcfs_ioctl_hdr *)buf;
         data = (struct libcfs_ioctl_data *)buf;
 
-        err = cfs_copy_from_user(buf, (void *)arg, sizeof(*hdr));
+       err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
         if (err)
                 RETURN(err);
 
@@ -68,7 +68,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
                 RETURN(-EINVAL);
         }
 
-        err = cfs_copy_from_user(buf, (void *)arg, hdr->ioc_len);
+       err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
         if (err)
                 RETURN(err);
 
@@ -89,7 +89,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
 
 int libcfs_ioctl_popdata(void *arg, void *data, int size)
 {
-       if (cfs_copy_to_user((char *)arg, data, size))
+       if (copy_to_user((char *)arg, data, size))
                return -EFAULT;
        return 0;
 }
index 1d04567..6bedace 100644 (file)
@@ -59,9 +59,7 @@
  */
 
 void
-cfs_thread_proc(
-    void * context
-    )
+cfs_thread_proc(void *context)
 {
     cfs_thread_context_t * thread_context =
         (cfs_thread_context_t *) context;
@@ -74,7 +72,7 @@ cfs_thread_proc(
 
     /* Free the context memory */
 
-    cfs_free(context);
+    kfree(context);
 
     /* Terminate this system thread */
 
@@ -101,11 +99,11 @@ cfs_task_t kthread_run(int (*func)(void *), void *arg, char *name)
 {
     cfs_handle_t  thread = NULL;
     NTSTATUS      status;
-    cfs_thread_context_t * context = NULL;
+    cfs_thread_context_t *context = NULL;
 
     /* Allocate the context to be transferred to system thread */
 
-    context = cfs_alloc(sizeof(cfs_thread_context_t), CFS_ALLOC_ZERO);
+    context = kmalloc(sizeof(cfs_thread_context_t), __GFP_ZERO);
 
     if (!context) {
        return ERR_PTR(-ENOMEM);
@@ -126,7 +124,7 @@ cfs_task_t kthread_run(int (*func)(void *), void *arg, char *name)
     if (!NT_SUCCESS(status)) {
 
 
-        cfs_free(context);
+       kfree(context);
 
         /* We need translate the nt status to linux error code */
 
@@ -248,10 +246,10 @@ cfs_symbol_register(const char *name, const void *value)
     struct cfs_symbol       *sym = NULL;
     struct cfs_symbol       *new = NULL;
 
-    new = cfs_alloc(sizeof(struct cfs_symbol), CFS_ALLOC_ZERO);
-    if (!new) {
-        return (-ENOMEM);
-    }
+       new = kmalloc(sizeof(struct cfs_symbol), __GFP_ZERO);
+       if (!new)
+               return -ENOMEM;
+
     strncpy(new->name, name, CFS_SYMBOL_LEN);
     new->value = (void *)value;
     new->ref = 0;
@@ -262,7 +260,7 @@ cfs_symbol_register(const char *name, const void *value)
                sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
                if (!strcmp(sym->name, name)) {
                        up_write(&cfs_symbol_lock);
-                       cfs_free(new);
+                       kfree(new);
                        return 0; /* alreay registerred */
                }
        }
@@ -299,7 +297,7 @@ cfs_symbol_unregister(const char *name)
         if (!strcmp(sym->name, name)) {
             LASSERT(sym->ref == 0);
             cfs_list_del (&sym->sym_list);
-            cfs_free(sym);
+           kfree(sym);
             break;
         }
     }
@@ -331,7 +329,7 @@ cfs_symbol_clean()
                sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
                LASSERT(sym->ref == 0);
                cfs_list_del (&sym->sym_list);
-               cfs_free(sym);
+               kfree(sym);
        }
        up_write(&cfs_symbol_lock);
        return;
@@ -767,12 +765,12 @@ libcfs_arch_init(void)
        and kernel ntoskrnl.lib) */
     cfs_libc_init();
 
-    /* create slab memory caches for page alloctors */
-    cfs_page_t_slab = cfs_mem_cache_create(
-        "CPGT", sizeof(cfs_page_t), 0, 0 );
+       /* create slab memory caches for page alloctors */
+       cfs_page_t_slab = kmem_cache_create("CPGT", sizeof(struct page),
+                                           0, 0, NULL);
 
-    cfs_page_p_slab = cfs_mem_cache_create(
-        "CPGP", CFS_PAGE_SIZE, 0, 0 );
+       cfs_page_p_slab = kmem_cache_create("CPGP", PAGE_CACHE_SIZE,
+                                           0, 0, NULL);
 
     if ( cfs_page_t_slab == NULL ||
          cfs_page_p_slab == NULL ){
@@ -810,15 +808,13 @@ libcfs_arch_init(void)
 
 errorout:
 
-    if (rc != 0) {
-        /* destroy the taskslot cache slab */
-        if (cfs_page_t_slab) {
-            cfs_mem_cache_destroy(cfs_page_t_slab);
-        }
-        if (cfs_page_p_slab) {
-            cfs_mem_cache_destroy(cfs_page_p_slab);
-        }
-    }
+       if (rc != 0) {
+               /* destroy the taskslot cache slab */
+               if (cfs_page_t_slab)
+                       kmem_cache_destroy(cfs_page_t_slab);
+               if (cfs_page_p_slab)
+                       kmem_cache_destroy(cfs_page_p_slab);
+       }
 
     return rc;
 }
@@ -840,11 +836,11 @@ libcfs_arch_cleanup(void)
 
     /* destroy the taskslot cache slab */
     if (cfs_page_t_slab) {
-        cfs_mem_cache_destroy(cfs_page_t_slab);
+kmem_cache_destroy(cfs_page_t_slab);
     }
 
     if (cfs_page_p_slab) {
-        cfs_mem_cache_destroy(cfs_page_p_slab);
+kmem_cache_destroy(cfs_page_p_slab);
     }
 
     return;
index 6a58609..770a452 100644 (file)
@@ -64,7 +64,7 @@ cfs_proc_entry_t *              cfs_proc_dev = NULL;
 
 
 /* SLAB object for cfs_proc_entry_t allocation */
-cfs_mem_cache_t *               proc_entry_cache = NULL;
+struct kmem_cache *proc_entry_cache;
 
 /* root node for sysctl table */
 cfs_sysctl_table_header_t       root_table_header;
@@ -98,9 +98,10 @@ proc_file_read(struct file * file, const char * buf, size_t nbytes, loff_t *ppos
     char    *start;
     cfs_proc_entry_t * dp;
 
-    dp = (cfs_proc_entry_t  *) file->f_inode->i_priv;
-    if (!(page = (char*) cfs_alloc(CFS_PAGE_SIZE, 0)))
-        return -ENOMEM;
+       dp = (cfs_proc_entry_t  *) file->f_inode->i_priv;
+       page = (char *) kmalloc(PAGE_CACHE_SIZE, 0);
+       if (page == NULL)
+               return -ENOMEM;
 
     while ((nbytes > 0) && !eof) {
 
@@ -132,7 +133,7 @@ proc_file_read(struct file * file, const char * buf, size_t nbytes, loff_t *ppos
             break;
         }
         
-        n -= cfs_copy_to_user((void *)buf, start, n);
+       n -= copy_to_user((void *)buf, start, n);
         if (n == 0) {
             if (retval == 0)
                 retval = -EFAULT;
@@ -144,9 +145,9 @@ proc_file_read(struct file * file, const char * buf, size_t nbytes, loff_t *ppos
         buf += n;
         retval += n;
     }
-    cfs_free(page);
+       kfree(page);
 
-    return retval;
+       return retval;
 }
 
 static ssize_t
@@ -181,10 +182,9 @@ proc_alloc_entry()
 {
     cfs_proc_entry_t * entry = NULL;
 
-    entry = cfs_mem_cache_alloc(proc_entry_cache, 0);
-    if (!entry) {
-        return NULL;
-    }
+       entry = kmem_cache_alloc(proc_entry_cache, 0);
+       if (!entry)
+               return NULL;
 
     memset(entry, 0, sizeof(cfs_proc_entry_t));
 
@@ -199,11 +199,9 @@ proc_alloc_entry()
 
 void
 proc_free_entry(cfs_proc_entry_t * entry)
-
 {
-    ASSERT(entry->magic == CFS_PROC_ENTRY_MAGIC);
-
-    cfs_mem_cache_free(proc_entry_cache, entry);
+       ASSERT(entry->magic == CFS_PROC_ENTRY_MAGIC);
+       kmem_cache_free(proc_entry_cache, entry);
 }
 
 /* dissect the path string for a given full proc path */
@@ -413,11 +411,10 @@ proc_search_entry(
     parent = root;
     entry = NULL;
 
-    ename = cfs_alloc(0x21, CFS_ALLOC_ZERO);
+       ename = kmalloc(0x21, __GFP_ZERO);
 
-    if (ename == NULL) {
-        goto errorout;
-    }
+       if (ename == NULL)
+               goto errorout;
 
 again:
 
@@ -452,7 +449,7 @@ again:
 errorout:
 
     if (ename) {
-        cfs_free(ename);
+       kfree(ename);
     }
 
     return entry;   
@@ -494,12 +491,10 @@ again:
             entry = proc_alloc_entry();
             memcpy(entry->name, ename, flen);
 
-            if (entry) {
-                if(!proc_insert_splay(parent, entry)) {
-                    proc_free_entry(entry);
-                    entry = NULL;
-                }
-            }
+               if (entry && !proc_insert_splay(parent, entry)) {
+                       proc_free_entry(entry);
+                       entry = NULL;
+               }
         }
 
         if (!entry) {
@@ -711,17 +706,15 @@ void proc_destory_subtree(cfs_proc_entry_t *entry)
 
 void proc_destroy_fs()
 {
-    LOCK_PROCFS();
+       LOCK_PROCFS();
 
-    if (cfs_proc_root) {
-        proc_destroy_splay(cfs_proc_root);
-    }
+       if (cfs_proc_root)
+               proc_destroy_splay(cfs_proc_root);
 
-    if (proc_entry_cache) {
-        cfs_mem_cache_destroy(proc_entry_cache);
-    }
+       if (proc_entry_cache)
+               kmem_cache_destroy(proc_entry_cache);
    
-    UNLOCK_PROCFS();
+       UNLOCK_PROCFS();
 }
 
 static char proc_item_path[512];
@@ -797,12 +790,8 @@ int proc_init_fs()
     CFS_INIT_LIST_HEAD(&(root_table_header.ctl_entry));
 
     INIT_PROCFS_LOCK();
-    proc_entry_cache = cfs_mem_cache_create(
-                            NULL,
-                            sizeof(cfs_proc_entry_t),
-                            0,
-                            0
-                            );
+    proc_entry_cache = kmem_cache_create(NULL, sizeof(cfs_proc_entry_t),
+                                        0, 0, NULL);
 
     if (!proc_entry_cache) {
         return (-ENOMEM);
@@ -999,26 +988,27 @@ int sysctl_string(cfs_sysctl_table_t *table, int *name, int nlen,
         return -ENOTDIR;
     
     if (oldval && oldlenp) {
-        if(get_user(len, oldlenp))
+       if (get_user(len, oldlenp))
             return -EFAULT;
-        if (len) {
-            l = strlen(table->data);
-            if (len > l) len = l;
-            if (len >= table->maxlen)
-                len = table->maxlen;
-            if(cfs_copy_to_user(oldval, table->data, len))
-                return -EFAULT;
-            if(put_user(0, ((char *) oldval) + len))
-                return -EFAULT;
-            if(put_user(len, oldlenp))
-                return -EFAULT;
-        }
+       if (len) {
+               l = strlen(table->data);
+               if (len > l)
+                       len = l;
+               if (len >= table->maxlen)
+                       len = table->maxlen;
+               if (copy_to_user(oldval, table->data, len))
+                       return -EFAULT;
+               if (put_user(0, ((char *) oldval) + len))
+                       return -EFAULT;
+               if (put_user(len, oldlenp))
+                       return -EFAULT;
+       }
     }
     if (newval && newlen) {
         len = newlen;
         if (len > table->maxlen)
             len = table->maxlen;
-        if(cfs_copy_from_user(table->data, newval, len))
+       if (copy_from_user(table->data, newval, len))
             return -EFAULT;
         if (len == table->maxlen)
             len--;
@@ -1088,12 +1078,12 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f
         if (write) {
             while (left) {
                 char c;
-                if(get_user(c,(char *) buffer))
-                    return -EFAULT;
+               if (get_user(c, (char *)buffer))
+                       return -EFAULT;
                 if (!isspace(c))
-                    break;
+                       break;
                 left--;
-                ((char *) buffer)++;
+               ((char *)buffer)++;
             }
             if (!left)
                 break;
@@ -1101,7 +1091,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f
             len = left;
             if (len > TMPBUFLEN-1)
                 len = TMPBUFLEN-1;
-            if(cfs_copy_from_user(buf, buffer, len))
+           if (copy_from_user(buf, buffer, len))
                 return -EFAULT;
             buf[len] = 0;
             p = buf;
@@ -1119,17 +1109,25 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f
                 val = -val;
             (char *)buffer += len;
             left -= len;
-            switch(op) {
-            case OP_SET:    *i = val; break;
-            case OP_AND:    *i &= val; break;
-            case OP_OR: *i |= val; break;
-            case OP_MAX:    if(*i < val)
-                        *i = val;
-                    break;
-            case OP_MIN:    if(*i > val)
-                        *i = val;
-                    break;
-            }
+               switch(op) {
+               case OP_SET:
+                       *i = val;
+                       break;
+               case OP_AND:
+                       *i &= val;
+                       break;
+               case OP_OR:
+                       *i |= val;
+                       break;
+               case OP_MAX:
+                       if (*i < val)
+                               *i = val;
+                       break;
+               case OP_MIN:
+                       if (*i > val)
+                               *i = val;
+                       break;
+               }
         } else {
             p = buf;
             if (!first)
@@ -1138,7 +1136,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f
             len = strlen(buf);
             if (len > left)
                 len = left;
-            if(cfs_copy_to_user(buffer, buf, len))
+           if (copy_to_user(buffer, buf, len))
                 return -EFAULT;
             left -= len;
             (char *)buffer += len;
@@ -1146,7 +1144,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f
     }
 
     if (!write && !first && left) {
-        if(put_user('\n', (char *) buffer))
+       if (put_user('\n', (char *) buffer))
             return -EFAULT;
         left--, ((char *)buffer)++;
     }
@@ -1154,7 +1152,7 @@ static int do_proc_dointvec(cfs_sysctl_table_t *table, int write, struct file *f
         p = (char *) buffer;
         while (left) {
             char c;
-            if(get_user(c, p++))
+           if (get_user(c, p++))
                 return -EFAULT;
             if (!isspace(c))
                 break;
@@ -1222,7 +1220,7 @@ int proc_dostring(cfs_sysctl_table_t *table, int write, struct file *filp,
         len = 0;
         p = buffer;
         while (len < *lenp) {
-            if(get_user(c, p++))
+           if (get_user(c, p++))
                 return -EFAULT;
             if (c == 0 || c == '\n')
                 break;
@@ -1230,7 +1228,7 @@ int proc_dostring(cfs_sysctl_table_t *table, int write, struct file *filp,
         }
         if (len >= (size_t)table->maxlen)
             len = (size_t)table->maxlen-1;
-        if(cfs_copy_from_user(table->data, buffer, len))
+       if (copy_from_user(table->data, buffer, len))
             return -EFAULT;
         ((char *) table->data)[len] = 0;
         filp->f_pos += *lenp;
@@ -1241,10 +1239,10 @@ int proc_dostring(cfs_sysctl_table_t *table, int write, struct file *filp,
         if (len > *lenp)
             len = *lenp;
         if (len)
-            if(cfs_copy_to_user(buffer, table->data, len))
+           if (copy_to_user(buffer, table->data, len))
                 return -EFAULT;
         if (len < *lenp) {
-            if(put_user('\n', ((char *) buffer) + len))
+           if (put_user('\n', ((char *) buffer) + len))
                 return -EFAULT;
             len++;
         }
@@ -1285,9 +1283,9 @@ int do_sysctl_strategy (cfs_sysctl_table_t *table,
             if (len) {
                 if (len > (size_t)table->maxlen)
                     len = (size_t)table->maxlen;
-                if(cfs_copy_to_user(oldval, table->data, len))
+               if (copy_to_user(oldval, table->data, len))
                     return -EFAULT;
-                if(put_user(len, oldlenp))
+               if (put_user(len, oldlenp))
                     return -EFAULT;
             }
         }
@@ -1295,7 +1293,7 @@ int do_sysctl_strategy (cfs_sysctl_table_t *table,
             len = newlen;
             if (len > (size_t)table->maxlen)
                 len = (size_t)table->maxlen;
-            if(cfs_copy_from_user(table->data, newval, len))
+           if (copy_from_user(table->data, newval, len))
                 return -EFAULT;
         }
     }
@@ -1366,7 +1364,7 @@ int do_sysctl(int *name, int nlen, void *oldval, size_t *oldlenp,
                     newval, newlen, head->ctl_table,
                     &context);
         if (context)
-            cfs_free(context);
+           kfree(context);
         if (error != -ENOTDIR)
             return error;
         tmp = tmp->next;
@@ -1447,7 +1445,7 @@ struct ctl_table_header *register_sysctl_table(cfs_sysctl_table_t * table,
                            int insert_at_head)
 {
     struct ctl_table_header *tmp;
-    tmp = cfs_alloc(sizeof(struct ctl_table_header), 0);
+    tmp = kmalloc(sizeof(struct ctl_table_header), 0);
     if (!tmp)
         return NULL;
     tmp->ctl_table = table;
@@ -1476,7 +1474,7 @@ void unregister_sysctl_table(struct ctl_table_header * header)
 #ifdef CONFIG_PROC_FS
     unregister_proc_table(header->ctl_table, cfs_proc_sys);
 #endif
-    cfs_free(header);
+    kfree(header);
 }
 
 
@@ -1658,13 +1656,13 @@ lustre_open_file(char *filename)
        if (fp == NULL)
                return NULL;
 
-       fh = cfs_alloc(sizeof(*fh), CFS_ALLOC_ZERO);
+       fh = kmalloc(sizeof(*fh), __GFP_ZERO);
        if (fh == NULL)
                return NULL;
 
-    fh->f_inode = cfs_alloc(sizeof(struct inode), CFS_ALLOC_ZERO);
+    fh->f_inode = kmalloc(sizeof(struct inode), __GFP_ZERO);
     if (!fh->f_inode) {
-        cfs_free(fh);
+       kfree(fh);
         return NULL;
     }
 
@@ -1678,8 +1676,8 @@ lustre_open_file(char *filename)
     }
 
     if (0 != rc) {
-        cfs_free(fh->f_inode);
-        cfs_free(fh);
+       kfree(fh->f_inode);
+       kfree(fh);
         return NULL;
     }
 
@@ -1699,8 +1697,8 @@ lustre_close_file(struct file *fh)
         fp->nlink--;
     }
 
-    cfs_free(fh->f_inode);
-    cfs_free(fh);
+    kfree(fh->f_inode);
+    kfree(fh);
 
     return rc;
 }
@@ -1877,7 +1875,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
        /* if not empty - flush it first */
        if (m->count) {
                n = min(m->count, size);
-               err = cfs_copy_to_user(buf, m->buf + m->from, n);
+               err = copy_to_user(buf, m->buf + m->from, n);
                if (err)
                        goto Efault;
                m->count -= n;
@@ -1903,7 +1901,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
                if (m->count < m->size)
                        goto Fill;
                m->op->stop(m, p);
-               cfs_free(m->buf);
+               kfree(m->buf);
                m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
                if (!m->buf)
                        goto Enomem;
@@ -1932,7 +1930,7 @@ Fill:
        }
        m->op->stop(m, p);
        n = min(m->count, size);
-       err = cfs_copy_to_user(buf, m->buf, n);
+       err = copy_to_user(buf, m->buf, n);
        if (err)
                goto Efault;
        copied += n;
@@ -2007,8 +2005,8 @@ static int traverse(struct seq_file *m, loff_t offset)
 
 Eoverflow:
        m->op->stop(m, p);
-       cfs_free(m->buf);
-       m->buf = cfs_alloc(m->size <<= 1, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO);
+       kfree(m->buf);
+       m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | __GFP_ZERO);
        return !m->buf ? -ENOMEM : -EAGAIN;
 }
 
@@ -2067,8 +2065,8 @@ int seq_release(struct inode *inode, struct file *file)
        struct seq_file *m = (struct seq_file *)file->private_data;
     if (m) {
         if (m->buf)
-               cfs_free(m->buf);
-           cfs_free(m);
+               kfree(m->buf);
+           kfree(m);
     }
        return 0;
 }
@@ -2195,7 +2193,7 @@ int single_open(struct file *file, int (*show)(struct seq_file *, void *),
                if (!res)
                        ((struct seq_file *)file->private_data)->private = data;
                else
-                       cfs_free(op);
+                       kfree(op);
        }
        return res;
 }
@@ -2205,7 +2203,7 @@ int single_release(struct inode *inode, struct file *file)
 {
        const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
        int res = seq_release(inode, file);
-       cfs_free((void *)op);
+       kfree((void *)op);
        return res;
 }
 EXPORT_SYMBOL(single_release);
@@ -2214,7 +2212,7 @@ int seq_release_private(struct inode *inode, struct file *file)
 {
        struct seq_file *seq = file->private_data;
 
-       cfs_free(seq->private);
+       kfree(seq->private);
        seq->private = NULL;
        return seq_release(inode, file);
 }
@@ -2227,7 +2225,7 @@ void *__seq_open_private(struct file *f, const struct seq_operations *ops,
        void *private;
        struct seq_file *seq;
 
-       private = cfs_alloc(psize, CFS_ALLOC_KERNEL | CFS_ALLOC_ZERO);
+       private = kmalloc(psize, GFP_KERNEL | __GFP_ZERO);
        if (private == NULL)
                goto out;
 
@@ -2240,7 +2238,7 @@ void *__seq_open_private(struct file *f, const struct seq_operations *ops,
        return private;
 
 out_free:
-       cfs_free(private);
+       kfree(private);
 out:
        return NULL;
 }
index 50d784f..04f9b66 100644 (file)
@@ -360,7 +360,7 @@ KsAllocateKsTsdu()
 
     } else {
 
-        KsTsdu = (PKS_TSDU) cfs_mem_cache_alloc(
+       KsTsdu = (PKS_TSDU) kmem_cache_alloc(
                         ks_data.ksnd_tsdu_slab, 0);
     }
 
@@ -393,7 +393,7 @@ KsFreeKsTsdu(
     PKS_TSDU  KsTsdu
     )
 {
-    cfs_mem_cache_free(
+    kmem_cache_free(
             ks_data.ksnd_tsdu_slab,
             KsTsdu );
 }
@@ -3035,7 +3035,7 @@ KsCleanupIpAddresses()
 
         list = RemoveHeadList(&ks_data.ksnd_addrs_list);
         slot = CONTAINING_RECORD(list, ks_addr_slot_t, link);
-        cfs_free(slot);
+       kfree(slot);
         ks_data.ksnd_naddrs--;
     }
 
@@ -3081,7 +3081,7 @@ KsAddAddressHandler(
                 return;
             }
 
-            slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
+           slot = kmalloc(sizeof(ks_addr_slot_t) + DeviceName->Length, __GFP_ZERO);
             if (slot != NULL) {
                spin_lock(&ks_data.ksnd_addrs_lock);
                 InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
@@ -3574,7 +3574,7 @@ KsTcpReceiveCompletionRoutine(
         /* free the Context structure... */
         ASSERT(Context->Magic == KS_TCP_CONTEXT_MAGIC);
         Context->Magic = 'CDAB';
-        cfs_free(Context);
+       kfree(Context);
     }
 
     /* free the Irp */
@@ -3745,7 +3745,7 @@ KsTcpSendCompletionRoutine(
     if (context) {
         ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
         context->Magic = 'CDAB';
-        cfs_free(context);
+       kfree(context);
     }
 
     /* free the Irp structure */
@@ -3854,7 +3854,7 @@ KsTcpReceiveEventHandler(
 
         /* there's still data in tdi internal queue, we need issue a new
            Irp to receive all of them. first allocate the tcp context */
-        context = cfs_alloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0);
+       context = kmalloc(sizeof(KS_TCP_COMPLETION_CONTEXT), 0);
         if (!context) {
             status = STATUS_INSUFFICIENT_RESOURCES;
             goto errorout;
@@ -3939,7 +3939,7 @@ errorout:
     if (context) {
         ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
         context->Magic = 'CDAB';
-        cfs_free(context);
+       kfree(context);
     }
 
     ks_abort_tconn(tconn);
@@ -4305,8 +4305,8 @@ ks_create_tconn()
     ks_tconn_t * tconn = NULL;
 
     /* allocate ksoc_tconn_t from the slab cache memory */
-    tconn = (ks_tconn_t *)cfs_mem_cache_alloc(
-                ks_data.ksnd_tconn_slab, CFS_ALLOC_ZERO);
+    tconn = (ks_tconn_t *)kmem_cache_alloc(
+               ks_data.ksnd_tconn_slab, __GFP_ZERO);
 
     if (tconn) {
 
@@ -4384,7 +4384,7 @@ ks_free_tconn(ks_tconn_t * tconn)
        spin_unlock(&(ks_data.ksnd_tconn_lock));
 
     /* free the structure memory */
-    cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
+    kmem_cache_free(ks_data.ksnd_tconn_slab, tconn);
 
     KsPrint((3, "ks_free_tconn: tconn %p is freed.\n", tconn));
 }
@@ -5645,7 +5645,7 @@ KsBuildSend(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr,
     length = KsQueryMdlsSize(mdl);
 
     /* we need allocate the ks_tx_t structure from memory pool. */
-    context = cfs_alloc(sizeof(ks_tdi_tx_t), 0);
+    context = kmalloc(sizeof(ks_tdi_tx_t), 0);
     if (!context) {
         status = STATUS_INSUFFICIENT_RESOURCES;
         goto errorout;
@@ -5696,7 +5696,7 @@ errorout:
     if (context) {
         ASSERT(context->Magic == KS_TCP_CONTEXT_MAGIC);
         context->Magic = 'CDAB';
-        cfs_free(context);
+       kfree(context);
     }
 
     /* here need free the Irp. */
@@ -5865,8 +5865,8 @@ ks_init_tdi_data()
     CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
     cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
 
-    ks_data.ksnd_tconn_slab = cfs_mem_cache_create(
-        "tcon", sizeof(ks_tconn_t) , 0, 0);
+    ks_data.ksnd_tconn_slab = kmem_cache_create("tcon", sizeof(ks_tconn_t),
+                                               0, 0, NULL);
 
     if (!ks_data.ksnd_tconn_slab) {
         rc = -ENOMEM;
@@ -5877,8 +5877,8 @@ ks_init_tdi_data()
        spin_lock_init(&ks_data.ksnd_tsdu_lock);
     CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
     ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
-    ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
-        "tsdu", ks_data.ksnd_tsdu_size, 0, 0);
+    ks_data.ksnd_tsdu_slab = kmem_cache_create("tsdu", ks_data.ksnd_tsdu_size,
+                                              0, 0, NULL);
 
     if (!ks_data.ksnd_tsdu_slab) {
         rc = -ENOMEM;
@@ -5890,8 +5890,8 @@ ks_init_tdi_data()
     if (ks_data.ksnd_engine_nums < 4) {
         ks_data.ksnd_engine_nums = 4;
     }
-    ks_data.ksnd_engine_mgr = cfs_alloc(sizeof(ks_engine_mgr_t) * 
-                         ks_data.ksnd_engine_nums,CFS_ALLOC_ZERO);
+    ks_data.ksnd_engine_mgr = kmalloc(sizeof(ks_engine_mgr_t) *
+                        ks_data.ksnd_engine_nums, __GFP_ZERO);
     if (ks_data.ksnd_engine_mgr == NULL) {
         rc = -ENOMEM;
         goto errorout;
@@ -5912,7 +5912,7 @@ errorout:
     /* do cleanup in case we get failures */
     if (rc < 0) {
         if (ks_data.ksnd_tconn_slab) {
-            cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab);
+kmem_cache_destroy(ks_data.ksnd_tconn_slab);
             ks_data.ksnd_tconn_slab = NULL;
         }
     }
@@ -5967,7 +5967,7 @@ ks_fini_tdi_data()
     cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
 
     /* it's safe to delete the tconn slab ... */
-    cfs_mem_cache_destroy(ks_data.ksnd_tconn_slab);
+kmem_cache_destroy(ks_data.ksnd_tconn_slab);
     ks_data.ksnd_tconn_slab = NULL;
 
     /* clean up all the tsud buffers in the free list */
@@ -5975,14 +5975,14 @@ ks_fini_tdi_data()
     cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
         KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
 
-        cfs_mem_cache_free(
+       kmem_cache_free(
                 ks_data.ksnd_tsdu_slab,
                 KsTsdu );
     }
        spin_unlock(&(ks_data.ksnd_tsdu_lock));
 
     /* it's safe to delete the tsdu slab ... */
-    cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
+kmem_cache_destroy(ks_data.ksnd_tsdu_slab);
     ks_data.ksnd_tsdu_slab = NULL;
 
     /* good! it's smooth to do the cleaning up...*/
@@ -6554,7 +6554,7 @@ int libcfs_ipif_enumerate(char ***names)
 
        spin_lock(&ks_data.ksnd_addrs_lock);
 
-    *names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
+    *names = kmalloc(sizeof(char *) * ks_data.ksnd_naddrs, __GFP_ZERO);
     if (*names == NULL) {
         goto errorout;
     }
@@ -6578,7 +6578,7 @@ errorout:
 void libcfs_ipif_free_enumeration(char **names, int n)
 {
     if (names) {
-        cfs_free(names);
+       kfree(names);
     }
 }
 
index cbfe862..9e34cd3 100644 (file)
@@ -62,8 +62,8 @@ int cfs_tracefile_init_arch()
        memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
        for (i = 0; i < CFS_TCD_TYPE_MAX; i++) {
                cfs_trace_data[i] =
-                        cfs_alloc(sizeof(union cfs_trace_data_union) * \
-                                 CFS_NR_CPUS, CFS_ALLOC_KERNEL);
+                       kmalloc(sizeof(union cfs_trace_data_union) * \
+                                 CFS_NR_CPUS, GFP_KERNEL);
                if (cfs_trace_data[i] == NULL)
                        goto out;
        }
@@ -78,8 +78,8 @@ int cfs_tracefile_init_arch()
        for (i = 0; i < cfs_num_possible_cpus(); i++)
                for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
                        cfs_trace_console_buffers[i][j] =
-                               cfs_alloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
-                                         CFS_ALLOC_KERNEL);
+                               kmalloc(CFS_TRACE_CONSOLE_BUFFER_SIZE,
+                                         GFP_KERNEL);
 
                        if (cfs_trace_console_buffers[i][j] == NULL)
                                goto out;
@@ -102,14 +102,14 @@ void cfs_tracefile_fini_arch()
        for (i = 0; i < cfs_num_possible_cpus(); i++) {
                for (j = 0; j < CFS_TCD_TYPE_MAX; j++) {
                        if (cfs_trace_console_buffers[i][j] != NULL) {
-                               cfs_free(cfs_trace_console_buffers[i][j]);
+                               kfree(cfs_trace_console_buffers[i][j]);
                                cfs_trace_console_buffers[i][j] = NULL;
                        }
                }
        }
 
        for (i = 0; cfs_trace_data[i] != NULL; i++) {
-               cfs_free(cfs_trace_data[i]);
+               kfree(cfs_trace_data[i]);
                cfs_trace_data[i] = NULL;
        }
 
@@ -217,7 +217,7 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
 
 int cfs_trace_max_debug_mb(void)
 {
-       int  total_mb = (cfs_num_physpages >> (20 - CFS_PAGE_SHIFT));
+       int  total_mb = (num_physpages >> (20 - PAGE_CACHE_SHIFT));
        
        return MAX(512, (total_mb * 80)/100);
 }
index a0f630c..c5f2312 100644 (file)
@@ -1020,7 +1020,7 @@ void* pgalloc(size_t factor)
 {
     LPVOID page;
 
-    page = VirtualAlloc(NULL, CFS_PAGE_SIZE << factor,
+    page = VirtualAlloc(NULL, PAGE_CACHE_SIZE << factor,
                         MEM_COMMIT, PAGE_READWRITE);
     return page;
 }
index ece517c..5f84d6c 100644 (file)
@@ -114,7 +114,7 @@ static int idr_pre_get(struct idr_context *idp)
        while (idp->id_free_cnt < IDR_FREE_MAX) {
                struct idr_layer *new;
 
-        new = cfs_alloc(sizeof(struct idr_layer), CFS_ALLOC_ZERO);
+       new = kmalloc(sizeof(struct idr_layer), __GFP_ZERO);
                if(new == NULL)
                        return (0);
                free_layer(idp, new);
@@ -326,7 +326,7 @@ static int _idr_remove(struct idr_context *idp, int id)
        }
        while (idp->id_free_cnt >= IDR_FREE_MAX) {
                p = alloc_layer(idp);
-               cfs_free(p);
+               kfree(p);
        }
        return 0;
 }
@@ -341,7 +341,7 @@ static int _idr_remove(struct idr_context *idp, int id)
 struct idr_context *cfs_idr_init()
 {
     struct idr_context * idp = NULL;
-    idp = cfs_alloc(sizeof(struct idr_context), 0);
+    idp = kmalloc(sizeof(struct idr_context), 0);
     if (idp) {
         memset(idp, 0, sizeof(struct idr_context));
     }
@@ -404,7 +404,7 @@ void *cfs_idr_find(struct idr_context *idp, int id)
 void cfs_idr_exit(struct idr_context *idp)
 {
     if (idp) {
-           cfs_free(idp);
+           kfree(idp);
     }
 }
 
index 562ca06..5dd29a3 100644 (file)
@@ -46,7 +46,7 @@
 /*
  * XXX Liang:
  *
- * Temporary fix, because lnet_me_free()->cfs_free->FREE() can be blocked in xnu,
+ * Temporary fix, because lnet_me_free()->kfree->FREE() can be blocked in xnu,
  * at then same time we've taken LNET_LOCK(), which is a spinlock.
  * by using LNET_USE_LIB_FREELIST, we can avoid calling of FREE().
  *
index eb82bb0..d6cd5ea 100644 (file)
@@ -326,16 +326,16 @@ typedef struct iovec lnet_md_iovec_t;
  * A page-based fragment of a MD.
  */
 typedef struct {
-        /** Pointer to the page where the fragment resides */
-        cfs_page_t      *kiov_page;
-        /** Length in bytes of the fragment */
-        unsigned int     kiov_len;
-        /**
-         * Starting offset of the fragment within the page. Note that the
-         * end of the fragment must not pass the end of the page; i.e.,
-         * kiov_len + kiov_offset <= CFS_PAGE_SIZE.
-         */
-        unsigned int     kiov_offset;
+       /** Pointer to the page where the fragment resides */
+       struct page      *kiov_page;
+       /** Length in bytes of the fragment */
+       unsigned int     kiov_len;
+       /**
+        * Starting offset of the fragment within the page. Note that the
+        * end of the fragment must not pass the end of the page; i.e.,
+        * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
+        */
+       unsigned int     kiov_offset;
 } lnet_kiov_t;
 /** @} lnet_md */
 
index 86298dd..adf9a44 100644 (file)
@@ -1188,7 +1188,7 @@ kiblnd_free_pages(kib_pages_t *p)
 
        for (i = 0; i < npages; i++) {
                if (p->ibp_pages[i] != NULL)
-                       cfs_free_page(p->ibp_pages[i]);
+                       __free_page(p->ibp_pages[i]);
        }
 
        LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
@@ -1212,7 +1212,7 @@ kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
 
         for (i = 0; i < npages; i++) {
                p->ibp_pages[i] = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
-                                                    CFS_ALLOC_IO);
+                                                    __GFP_IO);
                 if (p->ibp_pages[i] == NULL) {
                         CERROR("Can't allocate page %d of %d\n", i, npages);
                         kiblnd_free_pages(p);
index 2edf87d..62c3f56 100644 (file)
@@ -618,7 +618,7 @@ kptllnd_base_shutdown (void)
         LASSERT (cfs_list_empty(&kptllnd_data.kptl_idle_txs));
 
         if (kptllnd_data.kptl_rx_cache != NULL)
-                cfs_mem_cache_destroy(kptllnd_data.kptl_rx_cache);
+               kmem_cache_destroy(kptllnd_data.kptl_rx_cache);
 
         if (kptllnd_data.kptl_peers != NULL)
                 LIBCFS_FREE(kptllnd_data.kptl_peers,
@@ -797,7 +797,7 @@ kptllnd_base_startup (void)
         kptllnd_rx_buffer_pool_init(&kptllnd_data.kptl_rx_buffer_pool);
 
         kptllnd_data.kptl_rx_cache =
-                cfs_mem_cache_create("ptllnd_rx",
+               kmem_cache_create("ptllnd_rx",
                                      sizeof(kptl_rx_t) + 
                                      *kptllnd_tunables.kptl_max_msg_size,
                                      0,    /* offset */
index 6f511d5..e6fd99b 100644 (file)
@@ -270,7 +270,7 @@ struct kptl_data
         cfs_waitq_t             kptl_watchdog_waitq;   /* watchdog sleeps here */
 
         kptl_rx_buffer_pool_t   kptl_rx_buffer_pool;   /* rx buffer pool */
-        cfs_mem_cache_t*        kptl_rx_cache;         /* rx descripter cache */
+       struct kmem_cache       *kptl_rx_cache;         /* rx descripter cache */
 
         cfs_atomic_t            kptl_ntx;              /* # tx descs allocated */
        spinlock_t              kptl_tx_lock;        /* serialise idle tx list*/
index 39ef81f..094326c 100644 (file)
@@ -342,7 +342,7 @@ kptllnd_rx_alloc(void)
                 return NULL;
         }
 
-        rx = cfs_mem_cache_alloc(kptllnd_data.kptl_rx_cache, CFS_ALLOC_ATOMIC);
+       rx = kmem_cache_alloc(kptllnd_data.kptl_rx_cache, GFP_ATOMIC);
         if (rx == NULL) {
                 CERROR("Failed to allocate rx\n");
                 return NULL;
@@ -390,7 +390,7 @@ kptllnd_rx_done(kptl_rx_t *rx, int post_credit)
                 kptllnd_peer_decref(peer);
         }
 
-        cfs_mem_cache_free(kptllnd_data.kptl_rx_cache, rx);
+       kmem_cache_free(kptllnd_data.kptl_rx_cache, rx);
 }
 
 void
index 9148413..ff50818 100644 (file)
@@ -264,7 +264,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
         int           i;
         
         for (nob = i = 0; i < niov; i++) {
-                scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) +
+               scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
                                          kiov[i].kiov_offset;
                 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
         }
@@ -276,7 +276,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
          */
         rc = -sock_send(sock, &msg, MSG_DONTWAIT, &sndlen);
         for (i = 0; i < niov; i++)
-                cfs_kunmap(kiov[i].kiov_page);
+               kunmap(kiov[i].kiov_page);
         if (rc == 0)
                 rc = sndlen;
         return rc;
@@ -351,14 +351,14 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
         /* NB we can't trust socket ops to either consume our iovs
          * or leave them alone. */
         for (nob = i = 0; i < niov; i++) {
-                scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + \
+               scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + \
                                          kiov[i].kiov_offset;
                 nob += scratchiov[i].iov_len = kiov[i].kiov_len;
         }
         LASSERT (nob <= conn->ksnc_rx_nob_wanted);
         rc = -sock_receive(C2B_SOCK(conn->ksnc_sock), &msg, MSG_DONTWAIT, &rcvlen); 
         for (i = 0; i < niov; i++)
-                cfs_kunmap(kiov[i].kiov_page); 
+               kunmap(kiov[i].kiov_page);
         if (rc == 0)
                 rc = rcvlen;
         return (rc);
@@ -609,7 +609,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
         CFS_DECL_NET_DATA; 
         
         for (nob = i = 0; i < niov; i++) { 
-                scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + 
+               scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
                                          kiov[i].kiov_offset; 
                 nob += scratchiov[i].iov_len = kiov[i].kiov_len; 
         }
@@ -620,7 +620,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
         CFS_NET_EX;
 
         for (i = 0; i < niov; i++) 
-                cfs_kunmap(kiov[i].kiov_page);
+               kunmap(kiov[i].kiov_page);
 
         if (rc != 0) {
                 if (suio.uio_resid != nob &&\
@@ -800,7 +800,7 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
         CFS_DECL_NET_DATA;
 
         for (nob = i = 0; i < niov; i++) { 
-                scratchiov[i].iov_base = cfs_kmap(kiov[i].kiov_page) + kiov[i].kiov_offset; 
+               scratchiov[i].iov_base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
                 nob += scratchiov[i].iov_len = kiov[i].kiov_len; 
         } 
         LASSERT (nob <= conn->ksnc_rx_nob_wanted);
@@ -812,7 +812,7 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
         CFS_NET_EX;
 
         for (i = 0; i < niov; i++) 
-                cfs_kunmap(kiov[i].kiov_page);
+               kunmap(kiov[i].kiov_page);
 
         if (rc){
                 if (ruio.uio_resid != nob && \
index 805af17..1dff915 100644 (file)
@@ -688,7 +688,8 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
 
         for (nob = i = 0; i < niov; i++) {
                 if ((kiov[i].kiov_offset != 0 && i > 0) ||
-                    (kiov[i].kiov_offset + kiov[i].kiov_len != CFS_PAGE_SIZE && i < niov - 1))
+                   (kiov[i].kiov_offset + kiov[i].kiov_len !=
+                    PAGE_CACHE_SIZE && i < niov - 1))
                         return NULL;
 
                 pages[i] = kiov[i].kiov_page;
index 6aa6f9e..1df7cca 100644 (file)
@@ -2107,12 +2107,8 @@ lnet_ping (lnet_process_id_t id, int timeout_ms, lnet_process_id_t *ids, int n_i
         for (i = 0; i < n_ids; i++) {
                 tmpid.pid = info->pi_pid;
                 tmpid.nid = info->pi_ni[i].ns_nid;
-#ifdef __KERNEL__
-                if (cfs_copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
+               if (copy_to_user(&ids[i], &tmpid, sizeof(tmpid)))
                         goto out_1;
-#else
-                ids[i] = tmpid;
-#endif
         }
         rc = info->pi_nnis;
 
index f730e24..8008c63 100644 (file)
@@ -137,7 +137,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
                 for (i = 0; i < (int)niov; i++) {
                         /* We take the page pointer on trust */
                         if (lmd->md_iov.kiov[i].kiov_offset +
-                            lmd->md_iov.kiov[i].kiov_len > CFS_PAGE_SIZE )
+                           lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
                                 return -EINVAL; /* invalid length */
 
                         total_length += lmd->md_iov.kiov[i].kiov_len;
index 5240aac..a531d94 100644 (file)
@@ -368,47 +368,47 @@ lnet_copy_kiov2kiov (unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset
                                siov->kiov_len - soffset);
                 this_nob = MIN(this_nob, nob);
 
-                if (daddr == NULL)
-                        daddr = ((char *)cfs_kmap(diov->kiov_page)) + 
-                                diov->kiov_offset + doffset;
-                if (saddr == NULL)
-                        saddr = ((char *)cfs_kmap(siov->kiov_page)) + 
-                                siov->kiov_offset + soffset;
-
-                /* Vanishing risk of kmap deadlock when mapping 2 pages.
-                 * However in practice at least one of the kiovs will be mapped
-                 * kernel pages and the map/unmap will be NOOPs */
-
-                memcpy (daddr, saddr, this_nob);
-                nob -= this_nob;
-
-                if (diov->kiov_len > doffset + this_nob) {
-                        daddr += this_nob;
-                        doffset += this_nob;
-                } else {
-                        cfs_kunmap(diov->kiov_page);
-                        daddr = NULL;
-                        diov++;
-                        ndiov--;
-                        doffset = 0;
-                }
+               if (daddr == NULL)
+                       daddr = ((char *)kmap(diov->kiov_page)) +
+                               diov->kiov_offset + doffset;
+               if (saddr == NULL)
+                       saddr = ((char *)kmap(siov->kiov_page)) +
+                               siov->kiov_offset + soffset;
+
+               /* Vanishing risk of kmap deadlock when mapping 2 pages.
+                * However in practice at least one of the kiovs will be mapped
+                * kernel pages and the map/unmap will be NOOPs */
+
+               memcpy (daddr, saddr, this_nob);
+               nob -= this_nob;
+
+               if (diov->kiov_len > doffset + this_nob) {
+                       daddr += this_nob;
+                       doffset += this_nob;
+               } else {
+                       kunmap(diov->kiov_page);
+                       daddr = NULL;
+                       diov++;
+                       ndiov--;
+                       doffset = 0;
+               }
 
-                if (siov->kiov_len > soffset + this_nob) {
-                        saddr += this_nob;
-                        soffset += this_nob;
-                } else {
-                        cfs_kunmap(siov->kiov_page);
-                        saddr = NULL;
-                        siov++;
-                        nsiov--;
-                        soffset = 0;
-                }
-        } while (nob > 0);
+               if (siov->kiov_len > soffset + this_nob) {
+                       saddr += this_nob;
+                       soffset += this_nob;
+               } else {
+                       kunmap(siov->kiov_page);
+                       saddr = NULL;
+                       siov++;
+                       nsiov--;
+                       soffset = 0;
+               }
+       } while (nob > 0);
 
-        if (daddr != NULL)
-                cfs_kunmap(diov->kiov_page);
-        if (saddr != NULL)
-                cfs_kunmap(siov->kiov_page);
+       if (daddr != NULL)
+               kunmap(diov->kiov_page);
+       if (saddr != NULL)
+               kunmap(siov->kiov_page);
 }
 EXPORT_SYMBOL(lnet_copy_kiov2kiov);
 
@@ -450,7 +450,7 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
                 this_nob = MIN(this_nob, nob);
 
                 if (addr == NULL)
-                        addr = ((char *)cfs_kmap(kiov->kiov_page)) + 
+                       addr = ((char *)kmap(kiov->kiov_page)) +
                                 kiov->kiov_offset + kiovoffset;
 
                 memcpy ((char *)iov->iov_base + iovoffset, addr, this_nob);
@@ -468,17 +468,17 @@ lnet_copy_kiov2iov (unsigned int niov, struct iovec *iov, unsigned int iovoffset
                         addr += this_nob;
                         kiovoffset += this_nob;
                 } else {
-                        cfs_kunmap(kiov->kiov_page);
-                        addr = NULL;
-                        kiov++;
-                        nkiov--;
-                        kiovoffset = 0;
-                }
+                       kunmap(kiov->kiov_page);
+                       addr = NULL;
+                       kiov++;
+                       nkiov--;
+                       kiovoffset = 0;
+               }
 
-        } while (nob > 0);
+       } while (nob > 0);
 
-        if (addr != NULL)
-                cfs_kunmap(kiov->kiov_page);
+       if (addr != NULL)
+               kunmap(kiov->kiov_page);
 }
 EXPORT_SYMBOL(lnet_copy_kiov2iov);
 
@@ -520,34 +520,34 @@ lnet_copy_iov2kiov (unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffs
                 this_nob = MIN(this_nob, nob);
 
                 if (addr == NULL)
-                        addr = ((char *)cfs_kmap(kiov->kiov_page)) + 
-                                kiov->kiov_offset + kiovoffset;
+                       addr = ((char *)kmap(kiov->kiov_page)) +
+                               kiov->kiov_offset + kiovoffset;
 
-                memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
-                nob -= this_nob;
+               memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+               nob -= this_nob;
 
-                if (kiov->kiov_len > kiovoffset + this_nob) {
-                        addr += this_nob;
-                        kiovoffset += this_nob;
-                } else {
-                        cfs_kunmap(kiov->kiov_page);
-                        addr = NULL;
-                        kiov++;
-                        nkiov--;
-                        kiovoffset = 0;
-                }
+               if (kiov->kiov_len > kiovoffset + this_nob) {
+                       addr += this_nob;
+                       kiovoffset += this_nob;
+               } else {
+                       kunmap(kiov->kiov_page);
+                       addr = NULL;
+                       kiov++;
+                       nkiov--;
+                       kiovoffset = 0;
+               }
 
-                if (iov->iov_len > iovoffset + this_nob) {
-                        iovoffset += this_nob;
-                } else {
-                        iov++;
-                        niov--;
-                        iovoffset = 0;
-                }
-        } while (nob > 0);
+               if (iov->iov_len > iovoffset + this_nob) {
+                       iovoffset += this_nob;
+               } else {
+                       iov++;
+                       niov--;
+                       iovoffset = 0;
+               }
+       } while (nob > 0);
 
-        if (addr != NULL)
-                cfs_kunmap(kiov->kiov_page);
+       if (addr != NULL)
+               kunmap(kiov->kiov_page);
 }
 EXPORT_SYMBOL(lnet_copy_iov2kiov);
 
@@ -582,14 +582,14 @@ lnet_extract_kiov (int dst_niov, lnet_kiov_t *dst,
                 dst->kiov_page = src->kiov_page;
                 dst->kiov_offset = src->kiov_offset + offset;
 
-                if (len <= frag_len) {
-                        dst->kiov_len = len;
-                        LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
-                        return (niov);
-                }
+               if (len <= frag_len) {
+                       dst->kiov_len = len;
+                       LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
+                       return niov;
+               }
 
-                dst->kiov_len = frag_len;
-                LASSERT (dst->kiov_offset + dst->kiov_len <= CFS_PAGE_SIZE);
+               dst->kiov_len = frag_len;
+               LASSERT (dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
 
                 len -= frag_len;
                 dst++;
@@ -907,7 +907,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
        rbp = &the_lnet.ln_rtrpools[cpt][0];
 
        LASSERT(msg->msg_len <= LNET_MTU);
-       while (msg->msg_len > (unsigned int)rbp->rbp_npages * CFS_PAGE_SIZE) {
+       while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
                rbp++;
                LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
        }
@@ -2130,7 +2130,7 @@ LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
                        libcfs_id2str(target));
                 return -ENOMEM;
         }
-        msg->msg_vmflush = !!cfs_memory_pressure_get();
+       msg->msg_vmflush = !!memory_pressure_get();
 
        cpt = lnet_cpt_of_cookie(mdh.cookie);
        lnet_res_lock(cpt);
index 4cef9e8..dd5e9e3 100644 (file)
@@ -1296,7 +1296,7 @@ lnet_destroy_rtrbuf(lnet_rtrbuf_t *rb, int npages)
         int sz = offsetof(lnet_rtrbuf_t, rb_kiov[npages]);
 
         while (--npages >= 0)
-                cfs_free_page(rb->rb_kiov[npages].kiov_page);
+               __free_page(rb->rb_kiov[npages].kiov_page);
 
         LIBCFS_FREE(rb, sz);
 }
@@ -1318,16 +1318,16 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
 
        for (i = 0; i < npages; i++) {
                page = cfs_page_cpt_alloc(lnet_cpt_table(), cpt,
-                                         CFS_ALLOC_ZERO | CFS_ALLOC_STD);
+                                         __GFP_ZERO | GFP_IOFS);
                 if (page == NULL) {
                         while (--i >= 0)
-                                cfs_free_page(rb->rb_kiov[i].kiov_page);
+                               __free_page(rb->rb_kiov[i].kiov_page);
 
                         LIBCFS_FREE(rb, sz);
                         return NULL;
                 }
 
-                rb->rb_kiov[i].kiov_len = CFS_PAGE_SIZE;
+               rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
                 rb->rb_kiov[i].kiov_offset = 0;
                 rb->rb_kiov[i].kiov_page = page;
         }
@@ -1489,7 +1489,7 @@ int
 lnet_rtrpools_alloc(int im_a_router)
 {
        lnet_rtrbufpool_t *rtrp;
-       int     large_pages = (LNET_MTU + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+       int     large_pages = (LNET_MTU + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        int     small_pages = 1;
        int     nrb_tiny;
        int     nrb_small;
index d07ba78..d4382b0 100644 (file)
@@ -261,7 +261,7 @@ int LL_PROC_PROTO(proc_lnet_routes)
         if (len > *lenp) {    /* linux-supplied buffer is too small */
                 rc = -EINVAL;
         } else if (len > 0) { /* wrote something */
-                if (cfs_copy_to_user(buffer, tmpstr, len))
+               if (copy_to_user(buffer, tmpstr, len))
                         rc = -EFAULT;
                 else {
                        off += 1;
@@ -397,7 +397,7 @@ int LL_PROC_PROTO(proc_lnet_routers)
         if (len > *lenp) {    /* linux-supplied buffer is too small */
                 rc = -EINVAL;
         } else if (len > 0) { /* wrote something */
-                if (cfs_copy_to_user(buffer, tmpstr, len))
+               if (copy_to_user(buffer, tmpstr, len))
                         rc = -EFAULT;
                 else {
                        off += 1;
@@ -565,7 +565,7 @@ int LL_PROC_PROTO(proc_lnet_peers)
         if (len > *lenp) {    /* linux-supplied buffer is too small */
                 rc = -EINVAL;
         } else if (len > 0) { /* wrote something */
-                if (cfs_copy_to_user(buffer, tmpstr, len))
+               if (copy_to_user(buffer, tmpstr, len))
                         rc = -EFAULT;
                 else
                        *ppos = LNET_PROC_POS_MAKE(cpt, ver, hash, hoff);
@@ -745,7 +745,7 @@ int LL_PROC_PROTO(proc_lnet_nis)
         if (len > *lenp) {    /* linux-supplied buffer is too small */
                 rc = -EINVAL;
         } else if (len > 0) { /* wrote something */
-                if (cfs_copy_to_user(buffer, tmpstr, len))
+               if (copy_to_user(buffer, tmpstr, len))
                         rc = -EFAULT;
                 else
                         *ppos += 1;
index e8b94e7..f905c0c 100644 (file)
@@ -87,7 +87,7 @@ brw_client_init (sfw_test_instance_t *tsi)
                npg   = breq->blk_npg;
                /* NB: this is not going to work for variable page size,
                 * but we have to keep it for compatibility */
-               len   = npg * CFS_PAGE_SIZE;
+               len   = npg * PAGE_CACHE_SIZE;
 
        } else {
                test_bulk_req_v1_t  *breq = &tsi->tsi_u.bulk_v1;
@@ -99,7 +99,7 @@ brw_client_init (sfw_test_instance_t *tsi)
                opc   = breq->blk_opc;
                flags = breq->blk_flags;
                len   = breq->blk_len;
-               npg   = (len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+               npg   = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        }
 
        if (npg > LNET_MAX_IOV || npg <= 0)
@@ -150,9 +150,9 @@ brw_inject_one_error (void)
 }
 
 void
-brw_fill_page (cfs_page_t *pg, int pattern, __u64 magic)
+brw_fill_page (struct page *pg, int pattern, __u64 magic)
 {
-        char *addr = cfs_page_address(pg);
+       char *addr = page_address(pg);
         int   i;
 
         LASSERT (addr != NULL);
@@ -164,13 +164,13 @@ brw_fill_page (cfs_page_t *pg, int pattern, __u64 magic)
 
         if (pattern == LST_BRW_CHECK_SIMPLE) {
                 memcpy(addr, &magic, BRW_MSIZE);
-                addr += CFS_PAGE_SIZE - BRW_MSIZE;
+               addr += PAGE_CACHE_SIZE - BRW_MSIZE;
                 memcpy(addr, &magic, BRW_MSIZE);
                 return;
         }
 
         if (pattern == LST_BRW_CHECK_FULL) {
-                for (i = 0; i < CFS_PAGE_SIZE / BRW_MSIZE; i++)
+               for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
                         memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
                 return;
         }
@@ -180,9 +180,9 @@ brw_fill_page (cfs_page_t *pg, int pattern, __u64 magic)
 }
 
 int
-brw_check_page (cfs_page_t *pg, int pattern, __u64 magic)
+brw_check_page (struct page *pg, int pattern, __u64 magic)
 {
-        char  *addr = cfs_page_address(pg);
+       char  *addr = page_address(pg);
         __u64  data = 0; /* make compiler happy */
         int    i;
 
@@ -195,7 +195,7 @@ brw_check_page (cfs_page_t *pg, int pattern, __u64 magic)
                 data = *((__u64 *) addr);
                 if (data != magic) goto bad_data;
 
-                addr += CFS_PAGE_SIZE - BRW_MSIZE;
+               addr += PAGE_CACHE_SIZE - BRW_MSIZE;
                 data = *((__u64 *) addr);
                 if (data != magic) goto bad_data;
 
@@ -203,7 +203,7 @@ brw_check_page (cfs_page_t *pg, int pattern, __u64 magic)
         }
 
         if (pattern == LST_BRW_CHECK_FULL) {
-                for (i = 0; i < CFS_PAGE_SIZE / BRW_MSIZE; i++) {
+               for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
                         data = *(((__u64 *) addr) + i);
                         if (data != magic) goto bad_data;
                 }
@@ -223,7 +223,7 @@ void
 brw_fill_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
 {
         int         i;
-        cfs_page_t *pg;
+       struct page *pg;
 
         for (i = 0; i < bk->bk_niov; i++) {
 #ifdef __KERNEL__
@@ -240,7 +240,7 @@ int
 brw_check_bulk (srpc_bulk_t *bk, int pattern, __u64 magic)
 {
         int         i;
-        cfs_page_t *pg;
+       struct page *pg;
 
         for (i = 0; i < bk->bk_niov; i++) {
 #ifdef __KERNEL__
@@ -283,7 +283,7 @@ brw_client_prep_rpc (sfw_test_unit_t *tsu,
                opc   = breq->blk_opc;
                flags = breq->blk_flags;
                npg   = breq->blk_npg;
-               len   = npg * CFS_PAGE_SIZE;
+               len   = npg * PAGE_CACHE_SIZE;
 
        } else {
                test_bulk_req_v1_t  *breq = &tsi->tsi_u.bulk_v1;
@@ -295,7 +295,7 @@ brw_client_prep_rpc (sfw_test_unit_t *tsu,
                opc   = breq->blk_opc;
                flags = breq->blk_flags;
                len   = breq->blk_len;
-               npg   = (len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+               npg   = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        }
 
        rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
@@ -470,10 +470,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
                        reply->brw_status = EINVAL;
                        return 0;
                }
-               npg = reqst->brw_len >> CFS_PAGE_SHIFT;
+               npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
 
        } else {
-               npg = (reqst->brw_len + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+               npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        }
 
        replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;
index 61c7242..68297bd 100644 (file)
@@ -63,9 +63,8 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_ses_namep,
-                               args->lstio_ses_nmlen)) {
+       if (copy_from_user(name, args->lstio_ses_namep,
+                          args->lstio_ses_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_ses_nmlen + 1);
                 return -EFAULT;
         }
@@ -137,7 +136,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
                 if (name == NULL)
                         return -ENOMEM;
 
-                if (cfs_copy_from_user(name, args->lstio_dbg_namep,
+               if (copy_from_user(name, args->lstio_dbg_namep,
                                        args->lstio_dbg_nmlen)) {
                         LIBCFS_FREE(name, args->lstio_dbg_nmlen + 1);
 
@@ -213,9 +212,8 @@ lst_group_add_ioctl(lstio_group_add_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_grp_namep,
-                               args->lstio_grp_nmlen)) {
+       if (copy_from_user(name, args->lstio_grp_namep,
+                          args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen);
                 return -EFAULT;
         }
@@ -247,9 +245,8 @@ lst_group_del_ioctl(lstio_group_del_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_grp_namep,
-                               args->lstio_grp_nmlen)) {
+       if (copy_from_user(name, args->lstio_grp_namep,
+                          args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
                 return -EFAULT;
         }
@@ -282,9 +279,8 @@ lst_group_update_ioctl(lstio_group_update_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                           args->lstio_grp_namep,
-                           args->lstio_grp_nmlen)) {
+       if (copy_from_user(name, args->lstio_grp_namep,
+                          args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
                 return -EFAULT;
         }
@@ -344,7 +340,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name, args->lstio_grp_namep,
+       if (copy_from_user(name, args->lstio_grp_namep,
                                args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
 
@@ -359,7 +355,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
 
        LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
        if (rc == 0 &&
-           cfs_copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
+           copy_to_user(args->lstio_grp_featp, &feats, sizeof(feats))) {
                return -EINVAL;
        }
 
@@ -408,23 +404,22 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
                     args->lstio_grp_ndentp == NULL) /* # of node entry */
                         return -EINVAL;
 
-                if (cfs_copy_from_user(&ndent, args->lstio_grp_ndentp,
-                                       sizeof(ndent)) ||
-                    cfs_copy_from_user(&index, args->lstio_grp_idxp,
-                                       sizeof(index)))
-                        return -EFAULT;
+               if (copy_from_user(&ndent, args->lstio_grp_ndentp,
+                                  sizeof(ndent)) ||
+                   copy_from_user(&index, args->lstio_grp_idxp,
+                                  sizeof(index)))
+                       return -EFAULT;
 
-                if (ndent <= 0 || index < 0)
-                        return -EINVAL;
-        }
+               if (ndent <= 0 || index < 0)
+                       return -EINVAL;
+       }
 
-        LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
-        if (name == NULL)
-                return -ENOMEM;
+       LIBCFS_ALLOC(name, args->lstio_grp_nmlen + 1);
+       if (name == NULL)
+               return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_grp_namep,
-                               args->lstio_grp_nmlen)) {
+       if (copy_from_user(name, args->lstio_grp_namep,
+                          args->lstio_grp_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_grp_nmlen + 1);
                 return -EFAULT;
         }
@@ -439,12 +434,12 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
         if (rc != 0) 
                 return rc;
 
-        if (args->lstio_grp_dentsp != NULL && 
-            (cfs_copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
-             cfs_copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
-                rc = -EFAULT;
+       if (args->lstio_grp_dentsp != NULL &&
+           (copy_to_user(args->lstio_grp_idxp, &index, sizeof(index)) ||
+            copy_to_user(args->lstio_grp_ndentp, &ndent, sizeof(ndent))))
+               rc = -EFAULT;
 
-        return 0;
+       return 0;
 }
 
 int
@@ -465,12 +460,11 @@ lst_batch_add_ioctl(lstio_batch_add_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_bat_namep,
-                               args->lstio_bat_nmlen)) {
-                LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-                return -EFAULT;
-        }
+       if (copy_from_user(name, args->lstio_bat_namep,
+                          args->lstio_bat_nmlen)) {
+               LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+               return -EFAULT;
+       }
 
         name[args->lstio_bat_nmlen] = 0;
 
@@ -499,12 +493,11 @@ lst_batch_run_ioctl(lstio_batch_run_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_bat_namep,
-                               args->lstio_bat_nmlen)) {
-                LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-                return -EFAULT;
-        }
+       if (copy_from_user(name, args->lstio_bat_namep,
+                          args->lstio_bat_nmlen)) {
+               LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+               return -EFAULT;
+       }
 
         name[args->lstio_bat_nmlen] = 0;
 
@@ -535,12 +528,11 @@ lst_batch_stop_ioctl(lstio_batch_stop_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_bat_namep,
-                               args->lstio_bat_nmlen)) {
-                LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-                return -EFAULT;
-        }
+       if (copy_from_user(name, args->lstio_bat_namep,
+                          args->lstio_bat_nmlen)) {
+               LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+               return -EFAULT;
+       }
 
         name[args->lstio_bat_nmlen] = 0;
 
@@ -574,12 +566,11 @@ lst_batch_query_ioctl(lstio_batch_query_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_bat_namep,
-                               args->lstio_bat_nmlen)) {
-                LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
-                return -EFAULT;
-        }
+       if (copy_from_user(name, args->lstio_bat_namep,
+                          args->lstio_bat_nmlen)) {
+               LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
+               return -EFAULT;
+       }
 
         name[args->lstio_bat_nmlen] = 0;
 
@@ -636,9 +627,9 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
                     args->lstio_bat_ndentp == NULL) /* # of node entry */
                         return -EINVAL;
 
-                if (cfs_copy_from_user(&index, args->lstio_bat_idxp,
+               if (copy_from_user(&index, args->lstio_bat_idxp,
                                        sizeof(index)) ||
-                    cfs_copy_from_user(&ndent, args->lstio_bat_ndentp,
+                   copy_from_user(&ndent, args->lstio_bat_ndentp,
                                        sizeof(ndent)))
                         return -EFAULT;
 
@@ -650,8 +641,8 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name,
-                               args->lstio_bat_namep, args->lstio_bat_nmlen)) {
+       if (copy_from_user(name, args->lstio_bat_namep,
+                          args->lstio_bat_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_bat_nmlen + 1);
                 return -EFAULT;
         }
@@ -668,12 +659,12 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
         if (rc != 0)
                 return rc;
 
-        if (args->lstio_bat_dentsp != NULL && 
-            (cfs_copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
-             cfs_copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
-                rc = -EFAULT;
+       if (args->lstio_bat_dentsp != NULL &&
+           (copy_to_user(args->lstio_bat_idxp, &index, sizeof(index)) ||
+            copy_to_user(args->lstio_bat_ndentp, &ndent, sizeof(ndent))))
+               rc = -EFAULT;
 
-        return rc;
+       return rc;
 }
 
 int
@@ -701,7 +692,7 @@ lst_stat_query_ioctl(lstio_stat_args_t *args)
         if (name == NULL)
                 return -ENOMEM;
 
-        if (cfs_copy_from_user(name, args->lstio_sta_namep,
+       if (copy_from_user(name, args->lstio_sta_namep,
                                args->lstio_sta_nmlen)) {
                 LIBCFS_FREE(name, args->lstio_sta_nmlen + 1);
                 return -EFAULT;
@@ -753,7 +744,8 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
         /* have parameter, check if parameter length is valid */
         if (args->lstio_tes_param != NULL &&
             (args->lstio_tes_param_len <= 0 ||
-             args->lstio_tes_param_len > CFS_PAGE_SIZE - sizeof(lstcon_test_t)))
+            args->lstio_tes_param_len >
+            PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
                 return -EINVAL;
 
         LIBCFS_ALLOC(name, args->lstio_tes_bat_nmlen + 1);
@@ -774,19 +766,16 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
                         goto out;
         }
 
-        rc = -EFAULT;
-        if (cfs_copy_from_user(name,
-                              args->lstio_tes_bat_name,
-                              args->lstio_tes_bat_nmlen) ||
-            cfs_copy_from_user(srcgrp,
-                              args->lstio_tes_sgrp_name,
-                              args->lstio_tes_sgrp_nmlen) ||
-            cfs_copy_from_user(dstgrp,
-                              args->lstio_tes_dgrp_name,
-                              args->lstio_tes_dgrp_nmlen) ||
-            cfs_copy_from_user(param, args->lstio_tes_param,
-                              args->lstio_tes_param_len))
-                goto out;
+       rc = -EFAULT;
+       if (copy_from_user(name, args->lstio_tes_bat_name,
+                          args->lstio_tes_bat_nmlen) ||
+           copy_from_user(srcgrp, args->lstio_tes_sgrp_name,
+                          args->lstio_tes_sgrp_nmlen) ||
+           copy_from_user(dstgrp, args->lstio_tes_dgrp_name,
+                          args->lstio_tes_dgrp_nmlen) ||
+           copy_from_user(param, args->lstio_tes_param,
+                             args->lstio_tes_param_len))
+               goto out;
 
         rc = lstcon_test_add(name,
                             args->lstio_tes_type,
@@ -797,7 +786,7 @@ int lst_test_add_ioctl(lstio_test_args_t *args)
                             &ret, args->lstio_tes_resultp);
 
         if (ret != 0)
-                rc = (cfs_copy_to_user(args->lstio_tes_retp, &ret,
+               rc = (copy_to_user(args->lstio_tes_retp, &ret,
                                        sizeof(ret))) ? -EFAULT : 0;
 out:
         if (name != NULL)
@@ -818,25 +807,25 @@ out:
 int
 lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
 {
-        char   *buf;
-        int     opc = data->ioc_u32[0];
-        int     rc;
+       char   *buf;
+       int     opc = data->ioc_u32[0];
+       int     rc;
 
-        if (cmd != IOC_LIBCFS_LNETST)
-                return -EINVAL;
+       if (cmd != IOC_LIBCFS_LNETST)
+               return -EINVAL;
 
-        if (data->ioc_plen1 > CFS_PAGE_SIZE)
-                return -EINVAL;
+       if (data->ioc_plen1 > PAGE_CACHE_SIZE)
+               return -EINVAL;
 
-        LIBCFS_ALLOC(buf, data->ioc_plen1);
-        if (buf == NULL)
-                return -ENOMEM;
+       LIBCFS_ALLOC(buf, data->ioc_plen1);
+       if (buf == NULL)
+               return -ENOMEM;
 
-        /* copy in parameter */
-        if (cfs_copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
-                LIBCFS_FREE(buf, data->ioc_plen1);
-                return -EFAULT;
-        }
+       /* copy in parameter */
+       if (copy_from_user(buf, data->ioc_pbuf1, data->ioc_plen1)) {
+               LIBCFS_FREE(buf, data->ioc_plen1);
+               return -EFAULT;
+       }
 
        mutex_lock(&console_session.ses_mutex);
 
@@ -918,15 +907,15 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
                         rc = -EINVAL;
         }
 
-        if (cfs_copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
-                             sizeof(lstcon_trans_stat_t)))
-                rc = -EFAULT;
+       if (copy_to_user(data->ioc_pbuf2, &console_session.ses_trans_stat,
+                        sizeof(lstcon_trans_stat_t)))
+               rc = -EFAULT;
 out:
        mutex_unlock(&console_session.ses_mutex);
 
-        LIBCFS_FREE(buf, data->ioc_plen1);
+       LIBCFS_FREE(buf, data->ioc_plen1);
 
-        return rc;
+       return rc;
 }
 
 EXPORT_SYMBOL(lstcon_ioctl_entry);
index 665a9a5..856caf7 100644 (file)
@@ -160,7 +160,7 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
                 if (bulk->bk_iovs[i].kiov_page == NULL)
                         continue;
 
-                cfs_free_page(bulk->bk_iovs[i].kiov_page);
+               __free_page(bulk->bk_iovs[i].kiov_page);
         }
 
         srpc_client_rpc_decref(crpc->crp_rpc);
@@ -494,7 +494,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
 
         cfs_list_for_each_entry_typed(crpc, &trans->tas_rpcs_list,
                                       lstcon_rpc_t, crp_link) {
-                if (cfs_copy_from_user(&tmp, next,
+               if (copy_from_user(&tmp, next,
                                        sizeof(cfs_list_t)))
                         return -EFAULT;
 
@@ -515,35 +515,36 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
                       (cfs_time_t)console_session.ses_id.ses_stamp);
                 cfs_duration_usec(dur, &tv);
 
-                if (cfs_copy_to_user(&ent->rpe_peer,
-                                     &nd->nd_id, sizeof(lnet_process_id_t)) ||
-                    cfs_copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
-                    cfs_copy_to_user(&ent->rpe_state,
-                                     &nd->nd_state, sizeof(nd->nd_state)) ||
-                    cfs_copy_to_user(&ent->rpe_rpc_errno, &error,
-                                     sizeof(error)))
-                        return -EFAULT;
+               if (copy_to_user(&ent->rpe_peer,
+                                &nd->nd_id, sizeof(lnet_process_id_t)) ||
+                   copy_to_user(&ent->rpe_stamp, &tv, sizeof(tv)) ||
+                   copy_to_user(&ent->rpe_state,
+                                &nd->nd_state, sizeof(nd->nd_state)) ||
+                   copy_to_user(&ent->rpe_rpc_errno, &error,
+                                    sizeof(error)))
+                       return -EFAULT;
 
-                if (error != 0)
-                        continue;
+               if (error != 0)
+                       continue;
 
-                /* RPC is done */
-                rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
+               /* RPC is done */
+               rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
 
-                if (cfs_copy_to_user(&ent->rpe_sid,
-                                     &rep->sid, sizeof(lst_sid_t)) ||
-                    cfs_copy_to_user(&ent->rpe_fwk_errno,
-                                     &rep->status, sizeof(rep->status)))
-                        return -EFAULT;
+               if (copy_to_user(&ent->rpe_sid,
+                                &rep->sid, sizeof(lst_sid_t)) ||
+                   copy_to_user(&ent->rpe_fwk_errno,
+                                &rep->status, sizeof(rep->status)))
+                       return -EFAULT;
 
-                if (readent == NULL)
-                        continue;
+               if (readent == NULL)
+                       continue;
 
-                if ((error = readent(trans->tas_opc, msg, ent)) != 0)
-                        return error;
-        }
+               error = readent(trans->tas_opc, msg, ent);
+               if (error != 0)
+                       return error;
+       }
 
-        return 0;
+       return 0;
 }
 
 void
@@ -720,7 +721,7 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
         
         LASSERT (i < nkiov);
 
-        pid = (lnet_process_id_packed_t *)cfs_page_address(kiov[i].kiov_page);
+       pid = (lnet_process_id_packed_t *)page_address(kiov[i].kiov_page);
 
         return &pid[idx % SFW_ID_PER_PAGE];
 }
@@ -797,11 +798,12 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
 {
        test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
 
-        brq->blk_opc    = param->blk_opc;
-        brq->blk_npg    = (param->blk_size + CFS_PAGE_SIZE - 1) / CFS_PAGE_SIZE;
-        brq->blk_flags  = param->blk_flags;
+       brq->blk_opc    = param->blk_opc;
+       brq->blk_npg    = (param->blk_size + PAGE_CACHE_SIZE - 1) /
+                          PAGE_CACHE_SIZE;
+       brq->blk_flags  = param->blk_flags;
 
-        return 0;
+       return 0;
 }
 
 int
@@ -833,7 +835,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
        if (transop == LST_TRANS_TSBCLIADD) {
                npg = sfw_id_pages(test->tes_span);
                nob = (feats & LST_FEAT_BULK_LEN) == 0 ?
-                     npg * CFS_PAGE_SIZE :
+                     npg * PAGE_CACHE_SIZE :
                      sizeof(lnet_process_id_packed_t) * test->tes_span;
        }
 
@@ -860,13 +862,13 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
                        LASSERT(nob > 0);
 
                        len = (feats & LST_FEAT_BULK_LEN) == 0 ?
-                             CFS_PAGE_SIZE : min_t(int, nob, CFS_PAGE_SIZE);
+                             PAGE_CACHE_SIZE : min_t(int, nob, PAGE_CACHE_SIZE);
                        nob -= len;
 
                        bulk->bk_iovs[i].kiov_offset = 0;
                        bulk->bk_iovs[i].kiov_len    = len;
                        bulk->bk_iovs[i].kiov_page   =
-                               cfs_alloc_page(CFS_ALLOC_STD);
+                               alloc_page(GFP_IOFS);
 
                        if (bulk->bk_iovs[i].kiov_page == NULL) {
                                lstcon_rpc_put(*crpc);
index 04c1a38..9689506 100644 (file)
@@ -392,9 +392,9 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
         case LST_TRANS_SESQRY:
                 rep = &msg->msg_body.dbg_reply;
 
-                if (cfs_copy_to_user(&ent_up->rpe_priv[0],
+               if (copy_to_user(&ent_up->rpe_priv[0],
                                      &rep->dbg_timeout, sizeof(int)) ||
-                    cfs_copy_to_user(&ent_up->rpe_payload[0],
+                   copy_to_user(&ent_up->rpe_payload[0],
                                      &rep->dbg_name, LST_NAME_SIZE))
                         return -EFAULT;
 
@@ -426,7 +426,7 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
         }
 
         for (i = 0 ; i < count; i++) {
-                if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
+               if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
                         rc = -EFAULT;
                         break;
                 }
@@ -495,7 +495,7 @@ lstcon_group_nodes_remove(lstcon_group_t *grp,
         }
 
         for (i = 0; i < count; i++) {
-                if (cfs_copy_from_user(&id, &ids_up[i], sizeof(id))) {
+               if (copy_from_user(&id, &ids_up[i], sizeof(id))) {
                         rc = -EFAULT;
                         goto error;
                 }
@@ -740,7 +740,7 @@ lstcon_group_list(int index, int len, char *name_up)
         cfs_list_for_each_entry_typed(grp, &console_session.ses_grp_list,
                                       lstcon_group_t, grp_link) {
                 if (index-- == 0) {
-                        return cfs_copy_to_user(name_up, grp->grp_name, len) ?
+                       return copy_to_user(name_up, grp->grp_name, len) ?
                                -EFAULT : 0;
                 }
         }
@@ -770,9 +770,9 @@ lstcon_nodes_getent(cfs_list_t *head, int *index_p,
                         break;
 
                 nd = ndl->ndl_node;
-                if (cfs_copy_to_user(&dents_up[count].nde_id,
+               if (copy_to_user(&dents_up[count].nde_id,
                                      &nd->nd_id, sizeof(nd->nd_id)) ||
-                    cfs_copy_to_user(&dents_up[count].nde_state,
+                   copy_to_user(&dents_up[count].nde_state,
                          &n