Whamcloud - gitweb
LU-1346 libcfs: replace libcfs wrappers with kernel API
authorAndreas Dilger <adilger@whamcloud.com>
Tue, 4 Dec 2012 20:44:31 +0000 (13:44 -0700)
committerOleg Drokin <green@whamcloud.com>
Wed, 5 Dec 2012 13:48:49 +0000 (08:48 -0500)
The libcfs kernel portability library had wrappers for many low-level
kernel functions (locking, bit operations, etc) that were simple
wrappers around Linux kernel functions.  This provides no value for
Linux clients and clients for other kernels are not under development.

Remove the cfs_ prefix from these simple wrapper functions.  For other
kernels, they will need to use the Linux kernel API for portability.

Affected primitives:
spinlock_t, spin_lock_init, spin_lock, spin_unlock, spin_lock_bh,
spin_lock_bh_init, spin_unlock_bh, spin_trylock, spin_is_locked,
spin_lock_irq, spin_unlock_irq, read_lock_irqsave, write_lock_irqsave,
read_lock_irqrestore, write_lock_irqrestore, spin_lock_irqsave,
spin_unlock_irqrestore, SPIN_LOCK_UNLOCKED

rw_semaphore, init_rwsem, down_read, down_read_trylock, up_read,
down_write, down_write_trylock, up_write, fini_rwsem, DECLARE_RWSEM

semaphore, rw_semaphore, init_completion_module, call_wait_handler,
wait_handler_t, mt_completion_t, mt_init_completion,
mt_wait_for_completion, mt_complete, mt_fini_completion, mt_atomic_t,
mt_atomic_read, mt_atomic_set, mt_atomic_dec_and_test, mt_atomic_inc,
mt_atomic_dec, mt_atomic_add, mt_atomic_sub

rw_lock_t, rwlock_init, read_lock, read_unlock,
read_unlock_irqrestore, write_lock, write_unlock, write_lock_bh,
write_unlock_bh, RW_LOCK_UNLOCKED

completion_t, DECLARE_COMPLETION, INIT_COMPLETION, complete,
COMPLETION_INITIALIZER, init_completion, wait_for_completion,
wait_for_completion_interruptible, complete_and_exit, fini_completion

semaphore_t, DEFINE_SEMAPHORE, sema_init, up, down,
down_interruptible, down_trylock

mutex_t, DEFINE_MUTEX, mutex_init, mutex_lock, mutex_unlock,
mutex_lock_interruptible, mutex_trylock, mutex_is_locked,
mutex_destroy

lock_kernel, unlock_kernel

lock_class_key, lock_class_key_t, lockdep_set_class, lockdep_off,
lockdep_on, mutext_lock_nexted, spin_lock_nexted, down_read_nested,
down_write_nested

test_bit, set_bit, clear_bit, test_and_set_bit, test_and_clear_bit,
find_first_bit, find_first_zero_bit, find_next_bit,
find_next_zero_bit, ffz, ffs, fls

Change-Id: I36db204c703ed414504eaa9ba22e97ad7eb6cc2c
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Oleg Drokin <green@whamcloud.com>
Reviewed-on: http://review.whamcloud.com/2829
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
319 files changed:
build/libcfs_cleanup.sed [new file with mode: 0644]
libcfs/include/libcfs/bitmap.h
libcfs/include/libcfs/darwin/darwin-lock.h
libcfs/include/libcfs/libcfs_hash.h
libcfs/include/libcfs/libcfs_private.h
libcfs/include/libcfs/linux/linux-bitops.h
libcfs/include/libcfs/linux/linux-lock.h
libcfs/include/libcfs/linux/portals_compat25.h
libcfs/include/libcfs/lucache.h
libcfs/include/libcfs/params_tree.h
libcfs/include/libcfs/user-bitops.h
libcfs/include/libcfs/user-lock.h
libcfs/include/libcfs/winnt/portals_utils.h
libcfs/include/libcfs/winnt/winnt-fs.h
libcfs/include/libcfs/winnt/winnt-lock.h
libcfs/include/libcfs/winnt/winnt-mem.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/include/libcfs/winnt/winnt-tcpip.h
libcfs/libcfs/darwin/darwin-mem.c
libcfs/libcfs/fail.c
libcfs/libcfs/hash.c
libcfs/libcfs/kernel_user_comm.c
libcfs/libcfs/libcfs_lock.c
libcfs/libcfs/linux/linux-lwt.c
libcfs/libcfs/linux/linux-prim.c
libcfs/libcfs/linux/linux-tracefile.c
libcfs/libcfs/module.c
libcfs/libcfs/nidstrings.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/tracefile.h
libcfs/libcfs/upcall_cache.c
libcfs/libcfs/user-bitops.c
libcfs/libcfs/user-lock.c
libcfs/libcfs/user-prim.c
libcfs/libcfs/watchdog.c
libcfs/libcfs/winnt/winnt-curproc.c
libcfs/libcfs/winnt/winnt-lock.c
libcfs/libcfs/winnt/winnt-mem.c
libcfs/libcfs/winnt/winnt-prim.c
libcfs/libcfs/winnt/winnt-proc.c
libcfs/libcfs/winnt/winnt-sync.c
libcfs/libcfs/winnt/winnt-tcpip.c
libcfs/libcfs/winnt/winnt-tracefile.c
libcfs/libcfs/workitem.c
lnet/include/lnet/lib-lnet.h
lnet/include/lnet/lib-types.h
lnet/klnds/mxlnd/mxlnd.c
lnet/klnds/mxlnd/mxlnd.h
lnet/klnds/mxlnd/mxlnd_cb.c
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd.h
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/ptllnd/ptllnd.c
lnet/klnds/ptllnd/ptllnd.h
lnet/klnds/ptllnd/ptllnd_cb.c
lnet/klnds/ptllnd/ptllnd_peer.c
lnet/klnds/ptllnd/ptllnd_rx_buf.c
lnet/klnds/ptllnd/ptllnd_tx.c
lnet/klnds/qswlnd/qswlnd.c
lnet/klnds/qswlnd/qswlnd.h
lnet/klnds/qswlnd/qswlnd_cb.c
lnet/klnds/ralnd/ralnd.c
lnet/klnds/ralnd/ralnd.h
lnet/klnds/ralnd/ralnd_cb.c
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c
lnet/klnds/socklnd/socklnd_lib-linux.c
lnet/klnds/socklnd/socklnd_lib-winnt.c
lnet/klnds/socklnd/socklnd_proto.c
lnet/lnet/acceptor.c
lnet/lnet/api-ni.c
lnet/lnet/config.c
lnet/lnet/lib-ptl.c
lnet/lnet/module.c
lnet/lnet/router.c
lnet/selftest/conctl.c
lnet/selftest/conrpc.c
lnet/selftest/console.c
lnet/selftest/console.h
lnet/selftest/framework.c
lnet/selftest/ping_test.c
lnet/selftest/rpc.c
lnet/selftest/selftest.h
lnet/selftest/timer.c
lnet/ulnds/socklnd/conn.c
lnet/ulnds/socklnd/poll.c
lnet/ulnds/socklnd/usocklnd.c
lnet/ulnds/socklnd/usocklnd.h
lustre/fid/fid_handler.c
lustre/fid/fid_request.c
lustre/fid/lproc_fid.c
lustre/fld/fld_cache.c
lustre/fld/fld_handler.c
lustre/fld/fld_internal.h
lustre/fld/fld_request.c
lustre/fld/lproc_fld.c
lustre/include/cl_object.h
lustre/include/dt_object.h
lustre/include/lclient.h
lustre/include/liblustre.h
lustre/include/linux/lustre_compat25.h
lustre/include/linux/lustre_fsfilt.h
lustre/include/linux/lustre_patchless_compat.h
lustre/include/linux/obd.h
lustre/include/lprocfs_status.h
lustre/include/lu_object.h
lustre/include/lu_ref.h
lustre/include/lu_target.h
lustre/include/lustre_capa.h
lustre/include/lustre_dlm.h
lustre/include/lustre_export.h
lustre/include/lustre_fid.h
lustre/include/lustre_fld.h
lustre/include/lustre_handles.h
lustre/include/lustre_idmap.h
lustre/include/lustre_import.h
lustre/include/lustre_lib.h
lustre/include/lustre_lite.h
lustre/include/lustre_log.h
lustre/include/lustre_mdc.h
lustre/include/lustre_net.h
lustre/include/lustre_sec.h
lustre/include/md_object.h
lustre/include/obd.h
lustre/include/obd_class.h
lustre/lclient/lcommon_cl.c
lustre/lclient/lcommon_misc.c
lustre/ldlm/l_lock.c
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/llite/dcache.c
lustre/llite/dir.c
lustre/llite/file.c
lustre/llite/llite_capa.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/llite_mmap.c
lustre/llite/llite_nfs.c
lustre/llite/llite_rmtacl.c
lustre/llite/lloop.c
lustre/llite/lproc_llite.c
lustre/llite/remote_perm.c
lustre/llite/rw.c
lustre/llite/statahead.c
lustre/llite/vvp_dev.c
lustre/llite/xattr.c
lustre/lmv/lmv_internal.h
lustre/lmv/lmv_obd.c
lustre/lmv/lmv_object.c
lustre/lmv/lproc_lmv.c
lustre/lod/lod_dev.c
lustre/lod/lod_internal.h
lustre/lod/lod_lov.c
lustre/lod/lod_pool.c
lustre/lod/lod_qos.c
lustre/lod/lproc_lod.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_dev.c
lustre/lov/lov_ea.c
lustre/lov/lov_internal.h
lustre/lov/lov_io.c
lustre/lov/lov_obd.c
lustre/lov/lov_object.c
lustre/lov/lov_pack.c
lustre/lov/lov_pool.c
lustre/lov/lov_request.c
lustre/lov/lovsub_object.c
lustre/lvfs/fsfilt_ext3.c
lustre/lvfs/lvfs_lib.c
lustre/lvfs/lvfs_linux.c
lustre/mdc/mdc_locks.c
lustre/mdc/mdc_request.c
lustre/mdd/mdd_device.c
lustre/mdd/mdd_dir.c
lustre/mdd/mdd_internal.h
lustre/mdd/mdd_lfsck.c
lustre/mdd/mdd_lock.c
lustre/mdd/mdd_lproc.c
lustre/mdd/mdd_object.c
lustre/mdt/mdt_capa.c
lustre/mdt/mdt_handler.c
lustre/mdt/mdt_identity.c
lustre/mdt/mdt_idmap.c
lustre/mdt/mdt_internal.h
lustre/mdt/mdt_lib.c
lustre/mdt/mdt_lproc.c
lustre/mdt/mdt_open.c
lustre/mdt/mdt_recovery.c
lustre/mdt/mdt_reint.c
lustre/mgc/mgc_request.c
lustre/mgs/lproc_mgs.c
lustre/mgs/mgs_handler.c
lustre/mgs/mgs_internal.h
lustre/mgs/mgs_llog.c
lustre/mgs/mgs_nids.c
lustre/obdclass/capa.c
lustre/obdclass/cl_io.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_object.c
lustre/obdclass/cl_page.c
lustre/obdclass/class_obd.c
lustre/obdclass/genops.c
lustre/obdclass/idmap.c
lustre/obdclass/linux/linux-module.c
lustre/obdclass/linux/linux-obdo.c
lustre/obdclass/llog.c
lustre/obdclass/llog_cat.c
lustre/obdclass/llog_internal.h
lustre/obdclass/llog_ioctl.c
lustre/obdclass/llog_lvfs.c
lustre/obdclass/llog_obd.c
lustre/obdclass/llog_osd.c
lustre/obdclass/local_storage.c
lustre/obdclass/local_storage.h
lustre/obdclass/lprocfs_jobstats.c
lustre/obdclass/lprocfs_status.c
lustre/obdclass/lu_object.c
lustre/obdclass/lu_ref.c
lustre/obdclass/lustre_handles.c
lustre/obdclass/lustre_peer.c
lustre/obdclass/md_local_object.c
lustre/obdclass/obd_config.c
lustre/obdclass/obd_mount.c
lustre/obdecho/echo.c
lustre/obdecho/echo_client.c
lustre/ofd/lproc_ofd.c
lustre/ofd/ofd_capa.c
lustre/ofd/ofd_dev.c
lustre/ofd/ofd_fmd.c
lustre/ofd/ofd_fs.c
lustre/ofd/ofd_grant.c
lustre/ofd/ofd_internal.h
lustre/ofd/ofd_obd.c
lustre/ofd/ofd_objects.c
lustre/ofd/ofd_trans.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_dev.c
lustre/osc/osc_internal.h
lustre/osc/osc_io.c
lustre/osc/osc_lock.c
lustre/osc/osc_object.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c
lustre/osd-ldiskfs/osd_compat.c
lustre/osd-ldiskfs/osd_handler.c
lustre/osd-ldiskfs/osd_iam.c
lustre/osd-ldiskfs/osd_iam.h
lustre/osd-ldiskfs/osd_internal.h
lustre/osd-ldiskfs/osd_io.c
lustre/osd-ldiskfs/osd_lproc.c
lustre/osd-ldiskfs/osd_oi.c
lustre/osd-ldiskfs/osd_scrub.c
lustre/osd-ldiskfs/osd_scrub.h
lustre/osd-zfs/osd_handler.c
lustre/osd-zfs/osd_internal.h
lustre/osd-zfs/osd_io.c
lustre/osd-zfs/osd_object.c
lustre/osd-zfs/osd_xattr.c
lustre/osd-zfs/udmu.c
lustre/osd-zfs/udmu.h
lustre/osp/osp_dev.c
lustre/osp/osp_internal.h
lustre/osp/osp_object.c
lustre/osp/osp_precreate.c
lustre/osp/osp_sync.c
lustre/ost/ost_handler.c
lustre/ptlrpc/client.c
lustre/ptlrpc/events.c
lustre/ptlrpc/gss/gss_cli_upcall.c
lustre/ptlrpc/gss/gss_internal.h
lustre/ptlrpc/gss/gss_keyring.c
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/gss_mech_switch.c
lustre/ptlrpc/gss/gss_pipefs.c
lustre/ptlrpc/gss/gss_svc_upcall.c
lustre/ptlrpc/gss/lproc_gss.c
lustre/ptlrpc/gss/sec_gss.c
lustre/ptlrpc/import.c
lustre/ptlrpc/llog_client.c
lustre/ptlrpc/llog_net.c
lustre/ptlrpc/lproc_ptlrpc.c
lustre/ptlrpc/niobuf.c
lustre/ptlrpc/pack_generic.c
lustre/ptlrpc/pinger.c
lustre/ptlrpc/ptlrpc_module.c
lustre/ptlrpc/ptlrpcd.c
lustre/ptlrpc/recov_thread.c
lustre/ptlrpc/recover.c
lustre/ptlrpc/sec.c
lustre/ptlrpc/sec_bulk.c
lustre/ptlrpc/sec_config.c
lustre/ptlrpc/sec_gc.c
lustre/ptlrpc/sec_null.c
lustre/ptlrpc/sec_plain.c
lustre/ptlrpc/service.c
lustre/quota/lquota_internal.h
lustre/quota/qmt_dev.c
lustre/quota/qmt_entry.c
lustre/quota/qmt_internal.h
lustre/quota/qmt_lock.c
lustre/quota/qsd_config.c
lustre/quota/qsd_entry.c
lustre/quota/qsd_handler.c
lustre/quota/qsd_internal.h
lustre/quota/qsd_lib.c
lustre/quota/qsd_lock.c
lustre/quota/qsd_reint.c
lustre/quota/qsd_writeback.c
lustre/target/tgt_lastrcvd.c
lustre/target/tgt_main.c

diff --git a/build/libcfs_cleanup.sed b/build/libcfs_cleanup.sed
new file mode 100644 (file)
index 0000000..8b11c19
--- /dev/null
@@ -0,0 +1,361 @@
+#!/bin/sed -f
+
+# Script to cleanup libcfs macros, it runs against the tree at build time.
+# Migrate libcfs to emulate Linux kernel APIs.
+# http://jira.whamcloud.com/browse/LU-1346
+
+# remove extra blank line
+# /^$/{N;/^\n$/D}
+
+################################################################################
+# lock - spinlock, rw_semaphore, rwlock, completion, semaphore, mutex
+#      - lock_kernel, unlock_kernel, lockdep
+
+# spinlok
+/typedef  *spinlock_t  *cfs_spinlock_t;/d
+s/\bcfs_spinlock_t\b/spinlock_t/g
+s/\bcfs_spin_lock_init\b/spin_lock_init/g
+/#[ \t]*define[ \t]*\bspin_lock_init\b *( *\w* *)[ \t]*\bspin_lock_init\b *( *\w* *)/d
+s/\bcfs_spin_lock\b/spin_lock/g
+/#[ \t]*define[ \t]*\bspin_lock\b *( *\w* *)[ \t]*\bspin_lock\b *( *\w* *)/d
+s/\bcfs_spin_lock_bh\b/spin_lock_bh/g
+/#[ \t]*define[ \t]*\bspin_lock_bh\b *( *\w* *)[ \t]*\bspin_lock_bh\b *( *\w* *)/d
+s/\bcfs_spin_lock_bh_init\b/spin_lock_bh_init/g
+/#[ \t]*define[ \t]*\bspin_lock_bh_init\b *( *\w* *)[ \t]*\bspin_lock_bh_init\b *( *\w* *)/d
+s/\bcfs_spin_unlock\b/spin_unlock/g
+/#[ \t]*define[ \t]*\bspin_unlock\b *( *\w* *)[ \t]*\bspin_unlock\b *( *\w* *)/d
+s/\bcfs_spin_unlock_bh\b/spin_unlock_bh/g
+/#[ \t]*define[ \t]*\bspin_unlock_bh\b *( *\w* *)[ \t]*\bspin_unlock_bh\b *( *\w* *)/d
+s/\bcfs_spin_trylock\b/spin_trylock/g
+/#[ \t]*define[ \t]*\bspin_trylock\b *( *\w* *)[ \t]*\bspin_trylock\b *( *\w* *)/d
+s/\bcfs_spin_is_locked\b/spin_is_locked/g
+/#[ \t]*define[ \t]*\bspin_is_locked\b *( *\w* *)[ \t]*\bspin_is_locked\b *( *\w* *)/d
+
+s/\bcfs_spin_lock_irq\b/spin_lock_irq/g
+/#[ \t]*define[ \t]*\bspin_lock_irq\b *( *\w* *)[ \t]*\bspin_lock_irq\b *( *\w* *)/d
+s/\bcfs_spin_unlock_irq\b/spin_unlock_irq/g
+/#[ \t]*define[ \t]*\bspin_unlock_irq\b *( *\w* *)[ \t]*\bspin_unlock_irq\b *( *\w* *)/d
+s/\bcfs_read_lock_irqsave\b/read_lock_irqsave/g
+/#[ \t]*define[ \t]*\bread_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bread_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_write_lock_irqsave\b/write_lock_irqsave/g
+/#[ \t]*define[ \t]*\bwrite_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bwrite_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_write_unlock_irqrestore\b/write_unlock_irqrestore/g
+/#[ \t]*define[ \t]*\bwrite_unlock_irqrestore\b *( *\w* *, *\w* *)[ \t]*\bwrite_unlock_irqrestore\b *( *\w* *, *\w* *)/d
+s/\bcfs_spin_lock_irqsave\b/spin_lock_irqsave/g
+/#[ \t]*define[ \t]*\bspin_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bspin_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_spin_unlock_irqrestore\b/spin_unlock_irqrestore/g
+/#[ \t]*define[ \t]*\bspin_unlock_irqrestore\b *( *\w* *, *\w* *)[ \t]*\bspin_unlock_irqrestore\b *( *\w* *, *\w* *)/d
+s/\bCFS_SPIN_LOCK_UNLOCKED\b/SPIN_LOCK_UNLOCKED/g
+/#[ \t]*define[ \t]*\bSPIN_LOCK_UNLOCKED\b[ \t]*\bSPIN_LOCK_UNLOCKED\b/d
+
+# rw_semaphore
+s/\bcfs_rw_semaphore_t\b/struct rw_semaphore/g
+s/\bcfs_init_rwsem\b/init_rwsem/g
+/#[ \t]*define[ \t]*\binit_rwsem\b *( *\w* *)[ \t]*\binit_rwsem\b *( *\w* *)/d
+s/\bcfs_down_read\b/down_read/g
+/#[ \t]*define[ \t]*\bdown_read\b *( *\w* *)[ \t]*\bdown_read\b *( *\w* *)/d
+s/\bcfs_down_read_trylock\b/down_read_trylock/g
+/#[ \t]*define[ \t]*\bdown_read_trylock\b *( *\w* *)[ \t]*\bdown_read_trylock\b *( *\w* *)/d
+s/\bcfs_up_read\b/up_read/g
+/#[ \t]*define[ \t]*\bup_read\b *( *\w* *)[ \t]*\bup_read\b *( *\w* *)/d
+s/\bcfs_down_write\b/down_write/g
+/#[ \t]*define[ \t]*\bdown_write\b *( *\w* *)[ \t]*\bdown_write\b *( *\w* *)/d
+s/\bcfs_down_write_trylock\b/down_write_trylock/g
+/#[ \t]*define[ \t]*\bdown_write_trylock\b *( *\w* *)[ \t]*\bdown_write_trylock\b *( *\w* *)/d
+s/\bcfs_up_write\b/up_write/g
+/#[ \t]*define[ \t]*\bup_write\b *( *\w* *)[ \t]*\bup_write\b *( *\w* *)/d
+s/\bcfs_fini_rwsem\b/fini_rwsem/g
+s/\bCFS_DECLARE_RWSEM\b/DECLARE_RWSEM/g
+/#[ \t]*define[ \t]*\bDECLARE_RWSEM\b *( *\w* *)[ \t]*\bDECLARE_RWSEM\b *( *\w* *)/d
+
+s/\bcfs_semaphore\b/semaphore/g
+s/\bcfs_rw_semaphore\b/rw_semaphore/g
+s/\bcfs_init_completion_module\b/init_completion_module/g
+s/\bcfs_call_wait_handler\b/call_wait_handler/g
+s/\bcfs_wait_handler_t\b/wait_handler_t/g
+s/\bcfs_mt_completion_t\b/mt_completion_t/g
+s/\bcfs_mt_init_completion\b/mt_init_completion/g
+s/\bcfs_mt_wait_for_completion\b/mt_wait_for_completion/g
+s/\bcfs_mt_complete\b/mt_complete/g
+s/\bcfs_mt_fini_completion\b/mt_fini_completion/g
+s/\bcfs_mt_atomic_t\b/mt_atomic_t/g
+s/\bcfs_mt_atomic_read\b/mt_atomic_read/g
+s/\bcfs_mt_atomic_set\b/mt_atomic_set/g
+s/\bcfs_mt_atomic_dec_and_test\b/mt_atomic_dec_and_test/g
+s/\bcfs_mt_atomic_inc\b/mt_atomic_inc/g
+s/\bcfs_mt_atomic_dec\b/mt_atomic_dec/g
+s/\bcfs_mt_atomic_add\b/mt_atomic_add/g
+s/\bcfs_mt_atomic_sub\b/mt_atomic_sub/g
+
+# rwlock
+/typedef  *rwlock_t  *cfs_rwlock_t;/d
+s/\bcfs_rwlock_t\b/rwlock_t/g
+s/\bcfs_rwlock_init\b/rwlock_init/g
+/#[ \t]*define[ \t]*\brwlock_init\b *( *\w* *)[ \t]*\brwlock_init\b *( *\w* *)/d
+s/\bcfs_read_lock\b/read_lock/g
+/#[ \t]*define[ \t]*\bread_lock\b *( *\w* *)[ \t]*\bread_lock\b *( *\w* *)/d
+s/\bcfs_read_unlock\b/read_unlock/g
+/#[ \t]*define[ \t]*\bread_unlock\b *( *\w* *)[ \t]*\bread_unlock\b *( *\w* *)/d
+s/\bcfs_read_unlock_irqrestore\b/read_unlock_irqrestore/g
+#/#[ \t]*define[ \t]*\bread_unlock_irqrestore\b *( *\w* *)[ \t]*\bread_unlock_irqrestore\b *( *\w* *)/d
+/#define read_unlock_irqrestore(lock,flags) \\/{N;d}
+s/\bcfs_write_lock\b/write_lock/g
+/#[ \t]*define[ \t]*\bwrite_lock\b *( *\w* *)[ \t]*\bwrite_lock\b *( *\w* *)/d
+s/\bcfs_write_unlock\b/write_unlock/g
+/#[ \t]*define[ \t]*\bwrite_unlock\b *( *\w* *)[ \t]*\bwrite_unlock\b *( *\w* *)/d
+s/\bcfs_write_lock_bh\b/write_lock_bh/g
+/#[ \t]*define[ \t]*\bwrite_lock_bh\b *( *\w* *)[ \t]*\bwrite_lock_bh\b *( *\w* *)/d
+s/\bcfs_write_unlock_bh\b/write_unlock_bh/g
+/#[ \t]*define[ \t]*\bwrite_unlock_bh\b *( *\w* *)[ \t]*\bwrite_unlock_bh\b *( *\w* *)/d
+s/\bCFS_RW_LOCK_UNLOCKED\b/RW_LOCK_UNLOCKED/g
+/#[ \t]*define[ \t]*\bRW_LOCK_UNLOCKED\b  *\bRW_LOCK_UNLOCKED\b */d
+
+# completion
+s/\bcfs_completion_t\b/struct completion/g
+s/\bCFS_DECLARE_COMPLETION\b/DECLARE_COMPLETION/g
+/#[ \t]*define[ \t]*\bDECLARE_COMPLETION\b *( *\w* *)[ \t]*\bDECLARE_COMPLETION\b *( *\w* *)/d
+s/\bCFS_INIT_COMPLETION\b/INIT_COMPLETION/g
+/#[ \t]*define[ \t]*\bINIT_COMPLETION\b *( *\w* *)[ \t]*\bINIT_COMPLETION\b *( *\w* *)/d
+s/\bCFS_COMPLETION_INITIALIZER\b/COMPLETION_INITIALIZER/g
+/#[ \t]*define[ \t]*\bCOMPLETION_INITIALIZER\b *( *\w* *)[ \t]*\bCOMPLETION_INITIALIZER\b *( *\w* *)/d
+s/\bcfs_init_completion\b/init_completion/g
+/#[ \t]*define[ \t]*\binit_completion\b *( *\w* *)[ \t]*\binit_completion\b *( *\w* *)/d
+s/\bcfs_complete\b/complete/g
+/#[ \t]*define[ \t]*\bcomplete\b *( *\w* *)[ \t]*\bcomplete\b *( *\w* *)/d
+s/\bcfs_wait_for_completion\b/wait_for_completion/g
+/#[ \t]*define[ \t]*\bwait_for_completion\b *( *\w* *)[ \t]*\bwait_for_completion\b *( *\w* *)/d
+s/\bcfs_wait_for_completion_interruptible\b/wait_for_completion_interruptible/g
+/#define wait_for_completion_interruptible(c) \\/{N;d}
+s/\bcfs_complete_and_exit\b/complete_and_exit/g
+/#[ \t]*define[ \t]*\bcomplete_and_exit\b *( *\w* *, *\w* *)[ \t]*\bcomplete_and_exit\b *( *\w* *, *\w* *)/d
+s/\bcfs_fini_completion\b/fini_completion/g
+
+# semaphore
+s/\bcfs_semaphore_t\b/struct semaphore/g
+s/\bCFS_DEFINE_SEMAPHORE\b/DEFINE_SEMAPHORE/g
+/#[ \t]*define[ \t]*\bDEFINE_SEMAPHORE\b *( *\w* *)[ \t]*\bDEFINE_SEMAPHORE\b *( *\w* *)/d
+s/\bcfs_sema_init\b/sema_init/g
+/#[ \t]*define[ \t]*\bsema_init\b *( *\w* *, *\w* *)[ \t]*\bsema_init\b *( *\w* *, *\w* *)/d
+s/\bcfs_up\b/up/g
+/#[ \t]*define[ \t]*\bup\b *( *\w* *)[ \t]*\bup\b *( *\w* *)/d
+s/\bcfs_down\b/down/g
+/#[ \t]*define[ \t]*\bdown\b *( *\w* *)[ \t]*\bdown\b *( *\w* *)/d
+s/\bcfs_down_interruptible\b/down_interruptible/g
+/#[ \t]*define[ \t]*\bdown_interruptible\b *( *\w* *)[ \t]*\bdown_interruptible\b *( *\w* *)/d
+s/\bcfs_down_trylock\b/down_trylock/g
+/#[ \t]*define[ \t]*\bdown_trylock\b *( *\w* *)[ \t]*\bdown_trylock\b *( *\w* *)/d
+
+# mutex
+s/\bcfs_mutex_t\b/struct mutex/g
+s/\bCFS_DEFINE_MUTEX\b/DEFINE_MUTEX/g
+/#[ \t]*define[ \t]*\DEFINE_MUTEX\b *( *name *)[ \t]*\bDEFINE_MUTEX\b *( *name *)/d
+s/\bcfs_mutex_init\b/mutex_init/g
+/#[ \t]*define[ \t]*\bmutex_init\b *( *\w* *)[ \t]*\bmutex_init\b *( *\w* *)/d
+s/\bcfs_mutex_lock\b/mutex_lock/g
+/#[ \t]*define[ \t]*\bmutex_lock\b *( *\w* *)[ \t]*\bmutex_lock\b *( *\w* *)/d
+s/\bcfs_mutex_unlock\b/mutex_unlock/g
+/#[ \t]*define[ \t]*\bmutex_unlock\b *( *\w* *)[ \t]*\bmutex_unlock\b *( *\w* *)/d
+s/\bcfs_mutex_lock_interruptible\b/mutex_lock_interruptible/g
+/#[ \t]*define[ \t]*\bmutex_lock_interruptible\b *( *\w* *)[ \t]*\bmutex_lock_interruptible\b *( *\w* *)/d
+s/\bcfs_mutex_trylock\b/mutex_trylock/g
+/#[ \t]*define[ \t]*\bmutex_trylock\b *( *\w* *)[ \t]*\bmutex_trylock\b *( *\w* *)/d
+s/\bcfs_mutex_is_locked\b/mutex_is_locked/g
+/#[ \t]*define[ \t]*\bmutex_is_locked\b *( *\w* *)[ \t]*\bmutex_is_locked\b *( *\w* *)/d
+s/\bcfs_mutex_destroy\b/mutex_destroy/g
+/#[ \t]*define[ \t]*\bmutex_destroy\b *( *\w* *)[ \t]*\bmutex_destroy\b *( *\w* *)/d
+
+# lock_kernel, unlock_kernel
+# s/\bcfs_lock_kernel\b/lock_kernel/g
+# /#[ \t]*define[ \t]*\block_kernel\b *( *)[ \t]*\block_kernel\b *( *)/d
+# s/\bcfs_unlock_kernel\b/unlock_kernel/g
+# /#[ \t]*define[ \t]*\bunlock_kernel\b *( *)[ \t]*\bunlock_kernel\b *( *)/d
+
+# lockdep
+s/\bcfs_lock_class_key\b/lock_class_key/g
+s/\bcfs_lock_class_key_t\b/struct lock_class_key/g
+s/\bcfs_lockdep_set_class\b/lockdep_set_class/g
+s/\bcfs_lockdep_off\b/lockdep_off/g
+s/\bcfs_lockdep_on\b/lockdep_on/g
+/#[ \t]*define[ \t]*\blockdep_off\b *( *)[ \t]*\blockdep_off\b *( *)/d
+/#[ \t]*define[ \t]*\blockdep_on\b *( *)[ \t]*\blockdep_on\b *( *)/d
+/#[ \t]*define[ \t]*\blockdep_set_class\b *( *\w* *, *\w* *)[ \t]*\blockdep_set_class\b *( *\w* *, *\w* *)/d
+
+s/\bcfs_mutex_lock_nested\b/mutex_lock_nested/g
+#/#[ \t]*define[ \t]*\bmutex_lock_nested\b *( *\w* *, *\w* *)[ \t]*\bmutex_lock_nested\b *( *\w* *, *\w* *)/d
+/#define mutex_lock_nested(mutex, subclass) \\/{N;d}
+s/\bcfs_spin_lock_nested\b/spin_lock_nested/g
+/#[ \t]*define[ \t]*\bspin_lock_nested\b *( *\w* *, *\w* *)[ \t]*\bspin_lock_nested\b *( *\w* *, *\w* *)/d
+s/\bcfs_down_read_nested\b/down_read_nested/g
+/#[ \t]*define[ \t]*\bdown_read_nested\b *( *\w* *, *\w* *)[ \t]*\bdown_read_nested\b *( *\w* *, *\w* *)/d
+s/\bcfs_down_write_nested\b/down_write_nested/g
+/#[ \t]*define[ \t]*\bdown_write_nested\b *( *\w* *, *\w* *)[ \t]*\bdown_write_nested\b *( *\w* *, *\w* *)/d
+
+###############################################################################
+# bitops
+
+s/\bcfs_test_bit\b/test_bit/g
+/#[ \t]*define[ \t]*\btest_bit\b *( *\w* *, *\w* *)[ \t]*\btest_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_set_bit\b/set_bit/g
+/#[ \t]*define[ \t]*\bset_bit\b *( *\w* *, *\w* *)[ \t]*\bset_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_clear_bit\b/clear_bit/g
+/#[ \t]*define[ \t]*\bclear_bit\b *( *\w* *, *\w* *)[ \t]*\bclear_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_test_and_set_bit\b/test_and_set_bit/g
+/#[ \t]*define[ \t]*\btest_and_set_bit\b *( *\w* *, *\w* *)[ \t]*\btest_and_set_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_test_and_clear_bit\b/test_and_clear_bit/g
+/#[ \t]*define[ \t]*\btest_and_clear_bit\b *( *\w* *, *\w* *)[ \t]*\btest_and_clear_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_first_bit\b/find_first_bit/g
+/#[ \t]*define[ \t]*\bfind_first_bit\b *( *\w* *, *\w* *)[ \t]*\bfind_first_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_first_zero_bit\b/find_first_zero_bit/g
+/#[ \t]*define[ \t]*\bfind_first_zero_bit\b *( *\w* *, *\w* *)[ \t]*\bfind_first_zero_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_next_bit\b/find_next_bit/g
+/#[ \t]*define[ \t]*\bfind_next_bit\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bfind_next_bit\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_find_next_zero_bit\b/find_next_zero_bit/g
+/#define find_next_zero_bit(addr, size, off) \\/{N;d}
+s/\bcfs_ffz\b/ffz/g
+/#[ \t]*define[ \t]*\bffz\b *( *\w* *)[ \t]*\bffz\b *( *\w* *)/d
+s/\bcfs_ffs\b/ffs/g
+/#[ \t]*define[ \t]*\bffs\b *( *\w* *)[ \t]*\bffs\b *( *\w* *)/d
+s/\bcfs_fls\b/fls/g
+/#[ \t]*define[ \t]*\bfls\b *( *\w* *)[ \t]*\bfls\b *( *\w* *)/d
+
+################################################################################
+# file operations
+
+#s/\bcfs_file_t\b/file_t/g
+#s/\bcfs_dentry_t\b/dentry_t/g
+#s/\bcfs_dirent_t\b/dirent_t/g
+#s/\bcfs_kstatfs_t\b/kstatfs_t/g
+#s/\bcfs_filp_size\b/filp_size/g
+#s/\bcfs_filp_poff\b/filp_poff/g
+#s/\bcfs_filp_open\b/filp_open/g
+#/#[ \t]*define[ \t]*\bfilp_open\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bfilp_open\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_do_fsync\b/do_fsync/g
+#s/\bcfs_filp_close\b/filp_close/g
+#/#[ \t]*define[ \t]*\bfilp_close\b *( *\w* *, *\w* *)[ \t]*\bfilp_close\b *( *\w* *, *\w* *)/d
+#s/\bcfs_filp_read\b/filp_read/g
+#s/\bcfs_filp_write\b/filp_write/g
+#s/\bcfs_filp_fsync\b/filp_fsync/g
+#s/\bcfs_get_file\b/get_file/g
+#/#[ \t]*define[ \t]*\bget_file\b *( *\w* *)[ \t]*\bget_file\b *( *\w* *)/d
+#s/\bcfs_get_fd\b/fget/g
+#/#[ \t]*define[ \t]*\bfget\b *( *\w* *)[ \t]*\bfget\b *( *\w* *)/d
+#s/\bcfs_put_file\b/fput/g
+#/#[ \t]*define[ \t]*\bfput\b *( *\w* *)[ \t]*\bfput\b *( *\w* *)/d
+#s/\bcfs_file_count\b/file_count/g
+#/#[ \t]*define[ \t]*\bfile_count\b *( *\w* *)[ \t]*\bfile_count\b *( *\w* *)/d
+#s/\bCFS_INT_LIMIT\b/INT_LIMIT/g
+#s/\bCFS_OFFSET_MAX\b/OFFSET_MAX/g
+#s/\bcfs_flock_t\b/flock_t/g
+#s/\bcfs_flock_type\b/flock_type/g
+#s/\bcfs_flock_set_type\b/flock_set_type/g
+#s/\bcfs_flock_pid\b/flock_pid/g
+#s/\bcfs_flock_set_pid\b/flock_set_pid/g
+#s/\bcfs_flock_start\b/flock_start/g
+#s/\bcfs_flock_set_start\b/flock_set_start/g
+#s/\bcfs_flock_end\b/flock_end/g
+#s/\bcfs_flock_set_end\b/flock_set_end/g
+#s/\bcfs_user_write\b/user_write/g
+#s/\bCFS_IFSHIFT\b/IFSHIFT/g
+#s/\bCFS_IFTODT\b/IFTODT/g
+#s/\bCFS_DTTOIF\b/DTTOIF/g
+
+################################################################################
+# memory operations
+
+#s/\bcfs_page_t\b/page_t/g
+#s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
+#s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
+#s/\bCFS_PAGE_MASK\b/PAGE_CACHE_MASK/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_MASK\b[ \t]*\bPAGE_CACHE_MASK\b/d
+#s/\bcfs_num_physpages\b/num_physpages/g
+#/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
+#s/\bcfs_copy_from_user\b/copy_from_user/g
+#/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_copy_to_user\b/copy_to_user/g
+#/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_page_address\b/page_address/g
+#/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
+#s/\bcfs_kmap\b/kmap/g
+#/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
+#s/\bcfs_kunmap\b/kunmap/g
+#/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
+#s/\bcfs_get_page\b/get_page/g
+#/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
+#s/\bcfs_page_count\b/page_count/g
+#/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
+#s/\bcfs_page_index\b/page_index/g
+#/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
+#s/\bcfs_page_pin\b/page_cache_get/g
+#/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
+#s/\bcfs_page_unpin\b/page_cache_release/g
+#/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
+#s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
+#s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
+#s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
+#s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
+# memory allocator
+#s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
+#/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
+#s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
+#/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
+#s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
+#/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
+#s/\bCFS_ALLOC_FS\b/__GFP_FS/g
+#/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
+#s/\bCFS_ALLOC_IO\b/__GFP_IO/g
+#/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
+#s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
+#/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
+#s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
+#/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
+#s/\bCFS_ALLOC_USER\b/GFP_KERNEL/g
+#/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
+#s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
+#/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
+#s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
+#/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
+#s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
+#s/\bcfs_alloc\b/kmalloc/g
+#/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
+#s/\bcfs_free\b/kfree/g
+#/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
+#s/\bcfs_alloc_large\b/vmalloc/g
+#/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
+#s/\bcfs_free_large\b/vfree/g
+#/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
+#s/\bcfs_alloc_page\b/alloc_page/g
+#/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
+#s/\bcfs_free_page\b/__free_page/g
+#/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
+# TODO: SLAB allocator
+#s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
+#s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
+#s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
+#s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
+#/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
+#s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
+#/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
+#s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
+#/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
+#s/\bcfs_shrinker\b/shrinker/g
+#/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
+#s/\bcfs_shrinker_t\b/shrinker_t/g
+#/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
+#s/\bcfs_set_shrinker\b/set_shrinker/g
+#/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
+#s/\bcfs_remove_shrinker\b/remove_shrinker/g
+#/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
+#s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
+#/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+
+
+#s/\bcfs_\b//g
+#s/\bCFS_\b//g
+#/typedef[ \t]*\b\b[ \t]*\b\b/d
+#/#[ \t]*define[ \t]*\b\b[ \t]*\b\b/d
+#/#[ \t]*define[ \t]*\b\b *( *)[ \t]*\b\b *( *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *)[ \t]*\b\b *( *\w* *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *, *\w* *)[ \t]*\b\b *( *\w* *, *\w* *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *, *\w* *, *\w* *)[ \t]*\b\b *( *\w* *, *\w* *, *\w* *)/d
index 5436f3c..5991ccd 100644 (file)
@@ -62,32 +62,32 @@ cfs_bitmap_t *CFS_ALLOCATE_BITMAP(int size)
 static inline
 void cfs_bitmap_set(cfs_bitmap_t *bitmap, int nbit)
 {
-        cfs_set_bit(nbit, bitmap->data);
+       set_bit(nbit, bitmap->data);
 }
 
 static inline
 void cfs_bitmap_clear(cfs_bitmap_t *bitmap, int nbit)
 {
-        cfs_test_and_clear_bit(nbit, bitmap->data);
+       test_and_clear_bit(nbit, bitmap->data);
 }
 
 static inline
 int cfs_bitmap_check(cfs_bitmap_t *bitmap, int nbit)
 {
-        return cfs_test_bit(nbit, bitmap->data);
+       return test_bit(nbit, bitmap->data);
 }
 
 static inline
 int cfs_bitmap_test_and_clear(cfs_bitmap_t *bitmap, int nbit)
 {
-        return cfs_test_and_clear_bit(nbit, bitmap->data);
+       return test_and_clear_bit(nbit, bitmap->data);
 }
 
 /* return 0 is bitmap has none set bits */
 static inline
 int cfs_bitmap_check_empty(cfs_bitmap_t *bitmap)
 {
-        return cfs_find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
+       return find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
 }
 
 static inline
@@ -101,9 +101,9 @@ void cfs_bitmap_copy(cfs_bitmap_t *new, cfs_bitmap_t *old)
        new->size = newsize;
 }
 
-#define cfs_foreach_bit(bitmap, pos) \
-       for((pos)=cfs_find_first_bit((bitmap)->data, bitmap->size);   \
-           (pos) < (bitmap)->size;                               \
-           (pos) = cfs_find_next_bit((bitmap)->data, (bitmap)->size, (pos)+1))
+#define cfs_foreach_bit(bitmap, pos)                                   \
+       for ((pos) = find_first_bit((bitmap)->data, bitmap->size);      \
+            (pos) < (bitmap)->size;                                    \
+            (pos) = find_next_bit((bitmap)->data, (bitmap)->size, (pos) + 1))
 
 #endif
index 95203c9..8033c0f 100644 (file)
 
 /*
  * spin_lock (use Linux kernel's primitives)
- * 
+ *
  * - spin_lock_init(x)
  * - spin_lock(x)
  * - spin_unlock(x)
  * - spin_trylock(x)
- * 
+ *
  * - spin_lock_irqsave(x, f)
  * - spin_unlock_irqrestore(x, f)
  */
index ca785b8..47f3408 100644 (file)
@@ -131,8 +131,8 @@ struct cfs_hash_lock_ops;
 struct cfs_hash_hlist_ops;
 
 typedef union {
-        cfs_rwlock_t                rw;             /**< rwlock */
-        cfs_spinlock_t              spin;           /**< spinlock */
+       rwlock_t                rw;             /**< rwlock */
+       spinlock_t              spin;           /**< spinlock */
 } cfs_hash_lock_t;
 
 /**
@@ -307,7 +307,7 @@ typedef struct cfs_hash {
         cfs_hash_bucket_t         **hs_rehash_buckets;
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
         /** serialize debug members */
-        cfs_spinlock_t              hs_dep_lock;
+       spinlock_t                      hs_dep_lock;
         /** max depth */
         unsigned int                hs_dep_max;
         /** id of the deepest bucket */
index aaaac7c..5c55887 100644 (file)
@@ -470,7 +470,7 @@ struct cfs_percpt_lock {
        /* exclusively locked */
        unsigned int            pcl_locked;
        /* private lock table */
-       cfs_spinlock_t          **pcl_locks;
+       spinlock_t              **pcl_locks;
 };
 
 /* return number of private locks */
index 229ce4c..d625bc6 100644 (file)
  */
 #include <linux/bitops.h>
 
-#define cfs_test_bit(nr, addr)              test_bit(nr, addr)
-#define cfs_set_bit(nr, addr)               set_bit(nr, addr)
-#define cfs_clear_bit(nr, addr)             clear_bit(nr, addr)
-#define cfs_test_and_set_bit(nr, addr)      test_and_set_bit(nr, addr)
-#define cfs_test_and_clear_bit(nr, addr)    test_and_clear_bit(nr, addr)
-#define cfs_find_first_bit(addr, size)      find_first_bit(addr, size)
-#define cfs_find_first_zero_bit(addr, size) find_first_zero_bit(addr, size)
-#define cfs_find_next_bit(addr, size, off)  find_next_bit(addr, size, off)
-#define cfs_find_next_zero_bit(addr, size, off) \
-        find_next_zero_bit(addr, size, off)
 
-#define cfs_ffz(x)                          ffz(x)
-#define cfs_ffs(x)                          ffs(x)
-#define cfs_fls(x)                          fls(x)
index cc2ca51..943459f 100644 (file)
  * spinlock "implementation"
  */
 
-typedef spinlock_t cfs_spinlock_t;
-
-#define cfs_spin_lock_init(lock)             spin_lock_init(lock)
-#define cfs_spin_lock(lock)                  spin_lock(lock)
-#define cfs_spin_lock_bh(lock)               spin_lock_bh(lock)
-#define cfs_spin_lock_bh_init(lock)          spin_lock_bh_init(lock)
-#define cfs_spin_unlock(lock)                spin_unlock(lock)
-#define cfs_spin_unlock_bh(lock)             spin_unlock_bh(lock)
-#define cfs_spin_trylock(lock)               spin_trylock(lock)
-#define cfs_spin_is_locked(lock)             spin_is_locked(lock)
-
-#define cfs_spin_lock_irq(lock)              spin_lock_irq(lock)
-#define cfs_spin_unlock_irq(lock)            spin_unlock_irq(lock)
-#define cfs_read_lock_irqsave(lock, f)       read_lock_irqsave(lock, f)
-#define cfs_write_lock_irqsave(lock, f)      write_lock_irqsave(lock, f)
-#define cfs_write_unlock_irqrestore(lock, f) write_unlock_irqrestore(lock, f)
-#define cfs_spin_lock_irqsave(lock, f)       spin_lock_irqsave(lock, f)
-#define cfs_spin_unlock_irqrestore(lock, f)  spin_unlock_irqrestore(lock, f)
+
+
 
 /*
  * rw_semaphore "implementation" (use Linux kernel's primitives)
@@ -110,19 +94,10 @@ typedef spinlock_t cfs_spinlock_t;
  * - down_write(x)
  * - up_write(x)
  */
-typedef struct rw_semaphore cfs_rw_semaphore_t;
 
-#define cfs_init_rwsem(s)         init_rwsem(s)
-#define cfs_down_read(s)          down_read(s)
-#define cfs_down_read_trylock(s)  down_read_trylock(s)
-#define cfs_up_read(s)            up_read(s)
-#define cfs_down_write(s)         down_write(s)
-#define cfs_down_write_trylock(s) down_write_trylock(s)
-#define cfs_up_write(s)           up_write(s)
 
-#define cfs_fini_rwsem(s)         do {} while(0)
+#define fini_rwsem(s)          do {} while (0)
 
-#define CFS_DECLARE_RWSEM(name)   DECLARE_RWSEM(name)
 
 /*
  * rwlock_t "implementation" (use Linux kernel's primitives)
@@ -137,17 +112,7 @@ typedef struct rw_semaphore cfs_rw_semaphore_t;
  *
  * - RW_LOCK_UNLOCKED
  */
-typedef rwlock_t cfs_rwlock_t;
-
-#define cfs_rwlock_init(lock)                  rwlock_init(lock)
-#define cfs_read_lock(lock)                    read_lock(lock)
-#define cfs_read_unlock(lock)                  read_unlock(lock)
-#define cfs_read_unlock_irqrestore(lock,flags) \
-        read_unlock_irqrestore(lock, flags)
-#define cfs_write_lock(lock)                   write_lock(lock)
-#define cfs_write_unlock(lock)                 write_unlock(lock)
-#define cfs_write_lock_bh(lock)                write_lock_bh(lock)
-#define cfs_write_unlock_bh(lock)              write_unlock_bh(lock)
+
 
 #ifndef DEFINE_RWLOCK
 #define DEFINE_RWLOCK(lock)    rwlock_t lock = __RW_LOCK_UNLOCKED(lock)
@@ -165,18 +130,7 @@ typedef rwlock_t cfs_rwlock_t;
  * - wait_for_completion_interruptible(c)
  * - fini_completion(c)
  */
-typedef struct completion cfs_completion_t;
-
-#define CFS_DECLARE_COMPLETION(work)             DECLARE_COMPLETION(work)
-#define CFS_INIT_COMPLETION(c)                   INIT_COMPLETION(c)
-#define CFS_COMPLETION_INITIALIZER(work)         COMPLETION_INITIALIZER(work)
-#define cfs_init_completion(c)                   init_completion(c)
-#define cfs_complete(c)                          complete(c)
-#define cfs_wait_for_completion(c)               wait_for_completion(c)
-#define cfs_wait_for_completion_interruptible(c) \
-        wait_for_completion_interruptible(c)
-#define cfs_complete_and_exit(c, code)           complete_and_exit(c, code)
-#define cfs_fini_completion(c)                   do { } while (0)
+#define fini_completion(c) do { } while (0)
 
 /*
  * semaphore "implementation" (use Linux kernel's primitives)
@@ -187,19 +141,6 @@ typedef struct completion cfs_completion_t;
  * - down_interruptible(sem)
  * - down_trylock(sem)
  */
-typedef struct semaphore      cfs_semaphore_t;
-
-#ifdef DEFINE_SEMAPHORE
-#define CFS_DEFINE_SEMAPHORE(name)          DEFINE_SEMAPHORE(name)
-#else
-#define CFS_DEFINE_SEMAPHORE(name)          DECLARE_MUTEX(name)
-#endif
-
-#define cfs_sema_init(sem, val)             sema_init(sem, val)
-#define cfs_up(x)                           up(x)
-#define cfs_down(x)                         down(x)
-#define cfs_down_interruptible(x)           down_interruptible(x)
-#define cfs_down_trylock(x)                 down_trylock(x)
 
 /*
  * mutex "implementation" (use Linux kernel's primitives)
@@ -212,17 +153,6 @@ typedef struct semaphore      cfs_semaphore_t;
  * - mutex_is_locked(x)
  * - mutex_destroy(x)
  */
-typedef struct mutex cfs_mutex_t;
-
-#define CFS_DEFINE_MUTEX(name)             DEFINE_MUTEX(name)
-
-#define cfs_mutex_init(x)                   mutex_init(x)
-#define cfs_mutex_lock(x)                   mutex_lock(x)
-#define cfs_mutex_unlock(x)                 mutex_unlock(x)
-#define cfs_mutex_lock_interruptible(x)     mutex_lock_interruptible(x)
-#define cfs_mutex_trylock(x)                mutex_trylock(x)
-#define cfs_mutex_is_locked(x)              mutex_is_locked(x)
-#define cfs_mutex_destroy(x)                mutex_destroy(x)
 
 #ifndef lockdep_set_class
 
@@ -232,62 +162,43 @@ typedef struct mutex cfs_mutex_t;
  *
  **************************************************************************/
 
-typedef struct cfs_lock_class_key {
-        ;
-} cfs_lock_class_key_t;
+struct lock_class_key {
+       ;
+};
 
-#define cfs_lockdep_set_class(lock, key) \
-        do { (void)sizeof (lock);(void)sizeof (key); } while (0)
-/* This has to be a macro, so that `subclass' can be undefined in kernels that
- * do not support lockdep. */
+#define lockdep_set_class(lock, key) \
+       do { (void)sizeof(lock); (void)sizeof(key); } while (0)
+/* This has to be a macro, so that `subclass' can be undefined in kernels
+ * that do not support lockdep. */
 
 
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
 {
 }
 
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
 {
 }
 #else
-typedef struct lock_class_key cfs_lock_class_key_t;
 
-#define cfs_lockdep_set_class(lock, key) lockdep_set_class(lock, key)
-#define cfs_lockdep_off()                lockdep_off()
-#define cfs_lockdep_on()                 lockdep_on()
 #endif /* lockdep_set_class */
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 #ifndef mutex_lock_nested
-#define cfs_mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
-#else
-#define cfs_mutex_lock_nested(mutex, subclass) \
-        mutex_lock_nested(mutex, subclass)
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
 #endif
 
 #ifndef spin_lock_nested
-#define cfs_spin_lock_nested(lock, subclass) spin_lock(lock)
-#else
-#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
 #endif
 
 #ifndef down_read_nested
-#define cfs_down_read_nested(lock, subclass) down_read(lock)
-#else
-#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
+#define down_read_nested(lock, subclass) down_read(lock)
 #endif
 
 #ifndef down_write_nested
-#define cfs_down_write_nested(lock, subclass) down_write(lock)
-#else
-#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
+#define down_write_nested(lock, subclass) down_write(lock)
 #endif
-#else /* CONFIG_DEBUG_LOCK_ALLOC is defined */
-#define cfs_mutex_lock_nested(mutex, subclass) \
-        mutex_lock_nested(mutex, subclass)
-#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
-#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
-#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
 #endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 
index b3a493d..bbf260f 100644 (file)
 // XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
 
 #define SIGNAL_MASK_LOCK(task, flags)                                  \
-  spin_lock_irqsave(&task->sighand->siglock, flags)
+       spin_lock_irqsave(&task->sighand->siglock, flags)
 #define SIGNAL_MASK_UNLOCK(task, flags)                                \
-  spin_unlock_irqrestore(&task->sighand->siglock, flags)
+       spin_unlock_irqrestore(&task->sighand->siglock, flags)
 #define USERMODEHELPER(path, argv, envp)                               \
-  call_usermodehelper(path, argv, envp, 1)
+       call_usermodehelper(path, argv, envp, 1)
 #define RECALC_SIGPENDING         recalc_sigpending()
 #define CLEAR_SIGPENDING          clear_tsk_thread_flag(current,       \
                                                         TIF_SIGPENDING)
index 0f80cdd..64785c2 100644 (file)
@@ -116,15 +116,15 @@ struct upcall_cache_ops {
 };
 
 struct upcall_cache {
-        cfs_list_t              uc_hashtable[UC_CACHE_HASH_SIZE];
-        cfs_spinlock_t          uc_lock;
-        cfs_rwlock_t            uc_upcall_rwlock;
-
-        char                    uc_name[40];            /* for upcall */
-        char                    uc_upcall[UC_CACHE_UPCALL_MAXPATH];
-        int                     uc_acquire_expire;      /* seconds */
-        int                     uc_entry_expire;        /* seconds */
-        struct upcall_cache_ops *uc_ops;
+       cfs_list_t              uc_hashtable[UC_CACHE_HASH_SIZE];
+       spinlock_t              uc_lock;
+       rwlock_t                uc_upcall_rwlock;
+
+       char                    uc_name[40];            /* for upcall */
+       char                    uc_upcall[UC_CACHE_UPCALL_MAXPATH];
+       int                     uc_acquire_expire;      /* seconds */
+       int                     uc_entry_expire;        /* seconds */
+       struct upcall_cache_ops *uc_ops;
 };
 
 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
index c371c80..927ede9 100644 (file)
@@ -74,7 +74,7 @@ typedef struct poll_table_struct                cfs_poll_table_t;
 #define cfs_seq_open(file, ops, rc)             (rc = seq_open(file, ops))
 
 /* in lprocfs_stat.c, to protect the private data for proc entries */
-extern cfs_rw_semaphore_t       _lprocfs_lock;
+extern struct rw_semaphore             _lprocfs_lock;
 
 /* to begin from 2.6.23, Linux defines self file_operations (proc_reg_file_ops)
  * in procfs, the proc file_operation defined by Lustre (lprocfs_generic_fops)
@@ -86,14 +86,14 @@ extern cfs_rw_semaphore_t       _lprocfs_lock;
  */
 #ifndef HAVE_PROCFS_USERS
 
-#define LPROCFS_ENTRY()                 \
-do {                                    \
-        cfs_down_read(&_lprocfs_lock);  \
+#define LPROCFS_ENTRY()                \
+do {                                   \
+       down_read(&_lprocfs_lock);      \
 } while(0)
 
-#define LPROCFS_EXIT()                  \
-do {                                    \
-        cfs_up_read(&_lprocfs_lock);    \
+#define LPROCFS_EXIT()                 \
+do {                                   \
+       up_read(&_lprocfs_lock);        \
 } while(0)
 
 #else
@@ -121,14 +121,15 @@ int LPROCFS_ENTRY_AND_CHECK(struct proc_dir_entry *dp)
 static inline
 int LPROCFS_ENTRY_AND_CHECK(struct proc_dir_entry *dp)
 {
-        int deleted = 0;
-        spin_lock(&(dp)->pde_unload_lock);
-        if (dp->proc_fops == NULL)
-                deleted = 1;
-        spin_unlock(&(dp)->pde_unload_lock);
-        if (deleted)
-                return -ENODEV;
-        return 0;
+       int deleted = 0;
+
+       spin_lock(&(dp)->pde_unload_lock);
+       if (dp->proc_fops == NULL)
+               deleted = 1;
+       spin_unlock(&(dp)->pde_unload_lock);
+       if (deleted)
+               return -ENODEV;
+       return 0;
 }
 #else /* !HAVE_PROCFS_DELETED*/
 static inline
@@ -148,14 +149,14 @@ do {                                    \
         up_read(&_lprocfs_lock);        \
 } while(0)
 
-#define LPROCFS_WRITE_ENTRY()           \
-do {                                    \
-        cfs_down_write(&_lprocfs_lock); \
+#define LPROCFS_WRITE_ENTRY()          \
+do {                                   \
+       down_write(&_lprocfs_lock);     \
 } while(0)
 
-#define LPROCFS_WRITE_EXIT()            \
-do {                                    \
-        cfs_up_write(&_lprocfs_lock);   \
+#define LPROCFS_WRITE_EXIT()           \
+do {                                   \
+       up_write(&_lprocfs_lock);       \
 } while(0)
 #else /* !LPROCFS */
 
@@ -186,7 +187,7 @@ typedef struct cfs_seq_file {
         size_t                     count;
         loff_t                     index;
         loff_t                     version;
-        cfs_mutex_t                lock;
+       struct mutex                    lock;
         struct cfs_seq_operations *op;
         void                      *private;
 } cfs_seq_file_t;
index 3e667f1..51aba34 100644 (file)
@@ -40,7 +40,7 @@
 #define __LIBCFS_USER_BITOPS_H__
 
 /* test if bit nr is set in bitmap addr; returns previous value of bit nr */
-static __inline__ int cfs_test_and_set_bit(int nr, unsigned long *addr)
+static inline int test_and_set_bit(int nr, unsigned long *addr)
 {
         unsigned long mask;
 
@@ -51,10 +51,10 @@ static __inline__ int cfs_test_and_set_bit(int nr, unsigned long *addr)
         return nr;
 }
 
-#define cfs_set_bit(n, a) cfs_test_and_set_bit(n, a)
+#define set_bit(n, a) test_and_set_bit(n, a)
 
 /* clear bit nr in bitmap addr; returns previous value of bit nr*/
-static __inline__ int cfs_test_and_clear_bit(int nr, unsigned long *addr)
+static inline int test_and_clear_bit(int nr, unsigned long *addr)
 {
         unsigned long mask;
 
@@ -65,9 +65,9 @@ static __inline__ int cfs_test_and_clear_bit(int nr, unsigned long *addr)
         return nr;
 }
 
-#define cfs_clear_bit(n, a) cfs_test_and_clear_bit(n, a)
+#define clear_bit(n, a) test_and_clear_bit(n, a)
 
-static __inline__ int cfs_test_bit(int nr, const unsigned long *addr)
+static inline int test_bit(int nr, const unsigned long *addr)
 {
         return ((1UL << (nr & (BITS_PER_LONG - 1))) &
                 ((addr)[nr / BITS_PER_LONG])) != 0;
@@ -148,14 +148,13 @@ static __inline__ unsigned long __cfs_ffs(long data)
 #define __cfs_ffz(x)   __cfs_ffs(~(x))
 #define __cfs_flz(x)   __cfs_fls(~(x))
 
-unsigned long cfs_find_next_bit(unsigned long *addr,
-                                unsigned long size, unsigned long offset);
+unsigned long find_next_bit(unsigned long *addr,
+                           unsigned long size, unsigned long offset);
 
-unsigned long cfs_find_next_zero_bit(unsigned long *addr,
-                                     unsigned long size, unsigned long offset);
+unsigned long find_next_zero_bit(unsigned long *addr,
+                                unsigned long size, unsigned long offset);
 
-#define cfs_find_first_bit(addr,size)     (cfs_find_next_bit((addr),(size),0))
-#define cfs_find_first_zero_bit(addr,size)  \
-        (cfs_find_next_zero_bit((addr),(size),0))
+#define find_first_bit(addr, size)       find_next_bit((addr), (size),0)
+#define find_first_zero_bit(addr, size)  find_next_zero_bit((addr), (size),0)
 
 #endif
index cd4983b..0605308 100644 (file)
  */
 
 /*
- * cfs_spin_lock
+ * spin_lock
  *
- * - cfs_spin_lock_init(x)
- * - cfs_spin_lock(x)
- * - cfs_spin_unlock(x)
- * - cfs_spin_trylock(x)
- * - cfs_spin_lock_bh_init(x)
- * - cfs_spin_lock_bh(x)
- * - cfs_spin_unlock_bh(x)
+ * - spin_lock_init(x)
+ * - spin_lock(x)
+ * - spin_unlock(x)
+ * - spin_trylock(x)
+ * - spin_lock_bh_init(x)
+ * - spin_lock_bh(x)
+ * - spin_unlock_bh(x)
  *
- * - cfs_spin_is_locked(x)
- * - cfs_spin_lock_irqsave(x, f)
- * - cfs_spin_unlock_irqrestore(x, f)
+ * - spin_is_locked(x)
+ * - spin_lock_irqsave(x, f)
+ * - spin_unlock_irqrestore(x, f)
  *
  * No-op implementation.
  */
-struct cfs_spin_lock {int foo;};
+struct spin_lock { int foo; };
 
-typedef struct cfs_spin_lock cfs_spinlock_t;
+typedef struct spin_lock spinlock_t;
 
-#define DEFINE_SPINLOCK(lock)          cfs_spinlock_t lock = { }
-#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
-#define LASSERT_MUTEX_LOCKED(x) do {(void)sizeof(x);} while(0)
+#define DEFINE_SPINLOCK(lock)          spinlock_t lock = { }
+#define LASSERT_SPIN_LOCKED(lock)      do { (void)sizeof(lock); } while (0)
+#define LINVRNT_SPIN_LOCKED(lock)      do { (void)sizeof(lock); } while (0)
+#define LASSERT_SEM_LOCKED(sem)                do { (void)sizeof(sem); } while (0)
+#define LASSERT_MUTEX_LOCKED(x)                do { (void)sizeof(x); } while (0)
 
-void cfs_spin_lock_init(cfs_spinlock_t *lock);
-void cfs_spin_lock(cfs_spinlock_t *lock);
-void cfs_spin_unlock(cfs_spinlock_t *lock);
-int cfs_spin_trylock(cfs_spinlock_t *lock);
-void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
-void cfs_spin_lock_bh(cfs_spinlock_t *lock);
-void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
+void spin_lock_init(spinlock_t *lock);
+void spin_lock(spinlock_t *lock);
+void spin_unlock(spinlock_t *lock);
+int  spin_trylock(spinlock_t *lock);
+void spin_lock_bh_init(spinlock_t *lock);
+void spin_lock_bh(spinlock_t *lock);
+void spin_unlock_bh(spinlock_t *lock);
 
-static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
-static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
-static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
-                                              unsigned long f){}
+static inline int spin_is_locked(spinlock_t *l) { return 1; }
+static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
+static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
 
 /*
  * Semaphore
  *
- * - cfs_sema_init(x, v)
+ * - sema_init(x, v)
  * - __down(x)
  * - __up(x)
  */
-typedef struct cfs_semaphore {
-    int foo;
-} cfs_semaphore_t;
+struct semaphore {
+       int foo;
+};
 
-void cfs_sema_init(cfs_semaphore_t *s, int val);
-void __up(cfs_semaphore_t *s);
-void __down(cfs_semaphore_t *s);
-int __down_interruptible(cfs_semaphore_t *s);
+void sema_init(struct semaphore *s, int val);
+void __up(struct semaphore *s);
+void __down(struct semaphore *s);
+int __down_interruptible(struct semaphore *s);
 
-#define CFS_DEFINE_SEMAPHORE(name)      cfs_semaphore_t name = { 1 }
+#define DEFINE_SEMAPHORE(name)      struct semaphore name = { 1 }
 
-#define cfs_up(s)                       __up(s)
-#define cfs_down(s)                     __down(s)
-#define cfs_down_interruptible(s)       __down_interruptible(s)
+#define up(s)                          __up(s)
+#define down(s)                        __down(s)
+#define down_interruptible(s)          __down_interruptible(s)
 
-static inline int cfs_down_trylock(cfs_semaphore_t *sem)
+static inline int down_trylock(struct semaphore *sem)
 {
         return 0;
 }
@@ -135,94 +134,102 @@ static inline int cfs_down_trylock(cfs_semaphore_t *sem)
 /*
  * Completion:
  *
- * - cfs_init_completion_module(c)
- * - cfs_call_wait_handler(t)
- * - cfs_init_completion(c)
- * - cfs_complete(c)
- * - cfs_wait_for_completion(c)
- * - cfs_wait_for_completion_interruptible(c)
+ * - init_completion_module(c)
+ * - call_wait_handler(t)
+ * - init_completion(c)
+ * - complete(c)
+ * - wait_for_completion(c)
+ * - wait_for_completion_interruptible(c)
  */
-typedef struct {
-        unsigned int done;
-        cfs_waitq_t wait;
-} cfs_completion_t;
+struct completion {
+       unsigned int done;
+       cfs_waitq_t wait;
+};
 
-typedef int (*cfs_wait_handler_t) (int timeout);
-void cfs_init_completion_module(cfs_wait_handler_t handler);
-int  cfs_call_wait_handler(int timeout);
-void cfs_init_completion(cfs_completion_t *c);
-void cfs_complete(cfs_completion_t *c);
-void cfs_wait_for_completion(cfs_completion_t *c);
-int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
+typedef int (*wait_handler_t) (int timeout);
+void init_completion_module(wait_handler_t handler);
+int  call_wait_handler(int timeout);
+void init_completion(struct completion *c);
+void complete(struct completion *c);
+void wait_for_completion(struct completion *c);
+int wait_for_completion_interruptible(struct completion *c);
 
-#define CFS_COMPLETION_INITIALIZER(work) \
-        { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+#define COMPLETION_INITIALIZER(work) \
+       { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
 
-#define CFS_DECLARE_COMPLETION(work) \
-        cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
 
-#define CFS_INIT_COMPLETION(x)      ((x).done = 0)
+#define INIT_COMPLETION(x)     ((x).done = 0)
 
 
 /*
- * cfs_rw_semaphore:
+ * rw_semaphore:
  *
- * - cfs_init_rwsem(x)
- * - cfs_down_read(x)
- * - cfs_down_read_trylock(x)
- * - cfs_down_write(struct cfs_rw_semaphore *s);
- * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
- * - cfs_up_read(x)
- * - cfs_up_write(x)
- * - cfs_fini_rwsem(x)
+ * - init_rwsem(x)
+ * - down_read(x)
+ * - down_read_trylock(x)
+ * - down_write(struct rw_semaphore *s);
+ * - down_write_trylock(struct rw_semaphore *s);
+ * - up_read(x)
+ * - up_write(x)
+ * - fini_rwsem(x)
  */
-typedef struct cfs_rw_semaphore {
-        int foo;
-} cfs_rw_semaphore_t;
-
-void cfs_init_rwsem(cfs_rw_semaphore_t *s);
-void cfs_down_read(cfs_rw_semaphore_t *s);
-int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
-void cfs_down_write(cfs_rw_semaphore_t *s);
-int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
-void cfs_up_read(cfs_rw_semaphore_t *s);
-void cfs_up_write(cfs_rw_semaphore_t *s);
-void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
-#define CFS_DECLARE_RWSEM(name)  cfs_rw_semaphore_t name = { }
+struct rw_semaphore {
+       int foo;
+};
+
+void init_rwsem(struct rw_semaphore *s);
+void down_read(struct rw_semaphore *s);
+int down_read_trylock(struct rw_semaphore *s);
+void down_write(struct rw_semaphore *s);
+int down_write_trylock(struct rw_semaphore *s);
+void up_read(struct rw_semaphore *s);
+void up_write(struct rw_semaphore *s);
+void fini_rwsem(struct rw_semaphore *s);
+#define DECLARE_RWSEM(name)  struct rw_semaphore name = { }
 
 /*
  * read-write lock : Need to be investigated more!!
  * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
  *
- * - cfs_rwlock_init(x)
- * - cfs_read_lock(x)
- * - cfs_read_unlock(x)
- * - cfs_write_lock(x)
- * - cfs_write_unlock(x)
- * - cfs_write_lock_irqsave(x)
- * - cfs_write_unlock_irqrestore(x)
- * - cfs_read_lock_irqsave(x)
- * - cfs_read_unlock_irqrestore(x)
+ * - rwlock_init(x)
+ * - read_lock(x)
+ * - read_unlock(x)
+ * - write_lock(x)
+ * - write_unlock(x)
+ * - write_lock_irqsave(x)
+ * - write_unlock_irqrestore(x)
+ * - read_lock_irqsave(x)
+ * - read_unlock_irqrestore(x)
  */
-typedef cfs_rw_semaphore_t cfs_rwlock_t;
-#define DEFINE_RWLOCK(lock)    cfs_rwlock_t lock = { }
+#define rwlock_t               struct rw_semaphore
+#define DEFINE_RWLOCK(lock)    rwlock_t lock = { }
+
+#define rwlock_init(pl)                init_rwsem(pl)
 
-#define cfs_rwlock_init(pl)         cfs_init_rwsem(pl)
+#define read_lock(l)           down_read(l)
+#define read_unlock(l)         up_read(l)
+#define write_lock(l)          down_write(l)
+#define write_unlock(l)                up_write(l)
+
+static inline void write_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+       write_lock(l);
+}
 
-#define cfs_read_lock(l)            cfs_down_read(l)
-#define cfs_read_unlock(l)          cfs_up_read(l)
-#define cfs_write_lock(l)           cfs_down_write(l)
-#define cfs_write_unlock(l)         cfs_up_write(l)
+static inline void write_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+       write_unlock(l);
+}
 
-static inline void
-cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
-static inline void
-cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
+static inline void read_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+       read_lock(l);
+}
 
-static inline void
-cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
-static inline void
-cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
+static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+       read_unlock(l);
+}
 
 /*
  * Atomic for single-threaded user-space
@@ -260,26 +267,26 @@ typedef struct {
         int c_done;
         pthread_cond_t c_cond;
         pthread_mutex_t c_mut;
-} cfs_mt_completion_t;
+} mt_completion_t;
 
-void cfs_mt_init_completion(cfs_mt_completion_t *c);
-void cfs_mt_fini_completion(cfs_mt_completion_t *c);
-void cfs_mt_complete(cfs_mt_completion_t *c);
-void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
+void mt_init_completion(mt_completion_t *c);
+void mt_fini_completion(mt_completion_t *c);
+void mt_complete(mt_completion_t *c);
+void mt_wait_for_completion(mt_completion_t *c);
 
 /*
  * Multi-threaded user space atomic APIs
  */
 
-typedef struct { volatile int counter; } cfs_mt_atomic_t;
+typedef struct { volatile int counter; } mt_atomic_t;
 
-int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
-int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
-void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
+int mt_atomic_read(mt_atomic_t *a);
+void mt_atomic_set(mt_atomic_t *a, int b);
+int mt_atomic_dec_and_test(mt_atomic_t *a);
+void mt_atomic_inc(mt_atomic_t *a);
+void mt_atomic_dec(mt_atomic_t *a);
+void mt_atomic_add(int b, mt_atomic_t *a);
+void mt_atomic_sub(int b, mt_atomic_t *a);
 
 #endif /* HAVE_LIBPTHREAD */
 
@@ -288,28 +295,28 @@ void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
  * Mutex interface.
  *
  **************************************************************************/
-typedef struct cfs_semaphore cfs_mutex_t;
+#define mutex semaphore
 
-#define CFS_DEFINE_MUTEX(m) CFS_DEFINE_SEMAPHORE(m)
+#define DEFINE_MUTEX(m) DEFINE_SEMAPHORE(m)
 
-static inline void cfs_mutex_init(cfs_mutex_t *mutex)
+static inline void mutex_init(struct mutex *mutex)
 {
-        cfs_sema_init(mutex, 1);
+       sema_init(mutex, 1);
 }
 
-static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
+static inline void mutex_lock(struct mutex *mutex)
 {
-        cfs_down(mutex);
+       down(mutex);
 }
 
-static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
+static inline void mutex_unlock(struct mutex *mutex)
 {
-        cfs_up(mutex);
+       up(mutex);
 }
 
-static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
+static inline int mutex_lock_interruptible(struct mutex *mutex)
 {
-        return cfs_down_interruptible(mutex);
+       return down_interruptible(mutex);
 }
 
 /**
@@ -321,12 +328,12 @@ static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
  * \retval 1 try-lock succeeded (lock acquired).
  * \retval 0 indicates lock contention.
  */
-static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
+static inline int mutex_trylock(struct mutex *mutex)
 {
-        return !cfs_down_trylock(mutex);
+       return !down_trylock(mutex);
 }
 
-static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
+static inline void mutex_destroy(struct mutex *lock)
 {
 }
 
@@ -338,7 +345,7 @@ static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
  *
  * \retval 0 mutex is not locked. This should never happen.
  */
-static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
+static inline int mutex_is_locked(struct mutex *lock)
 {
         return 1;
 }
@@ -350,27 +357,26 @@ static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
  *
  **************************************************************************/
 
-typedef struct cfs_lock_class_key {
+struct lock_class_key {
         int foo;
-} cfs_lock_class_key_t;
+};
 
-static inline void cfs_lockdep_set_class(void *lock,
-                                         cfs_lock_class_key_t *key)
+static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
 {
 }
 
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
 {
 }
 
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
 {
 }
 
-#define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
-#define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
-#define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
-#define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
+#define down_read_nested(lock, subclass) down_read(lock)
+#define down_write_nested(lock, subclass) down_write(lock)
 
 
 /* !__KERNEL__ */
index ac8650e..ec660e0 100644 (file)
@@ -75,24 +75,24 @@ char * ul2dstr(ulong_ptr_t address, char *buf, int len);
 
 unsigned long simple_strtoul(const char *cp,char **endp, unsigned int base);
 
-static inline int cfs_set_bit(int nr, void * addr)
+static inline int set_bit(int nr, void * addr)
 {
     (((volatile ULONG *) addr)[nr >> 5]) |= (1UL << (nr & 31));
     return *((int *) addr);
 }
 
-static inline int cfs_test_bit(int nr, void * addr)
+static inline int test_bit(int nr, void * addr)
 {
     return (int)(((1UL << (nr & 31)) & (((volatile ULONG *) addr)[nr >> 5])) != 0);
 }
 
-static inline int cfs_clear_bit(int nr, void * addr)
+static inline int clear_bit(int nr, void * addr)
 {
     (((volatile ULONG *) addr)[nr >> 5]) &= (~(1UL << (nr & 31)));
     return *((int *) addr);
 }
 
-static inline int cfs_test_and_set_bit(int nr, volatile void *addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
 {
     int rc;
     unsigned char  mask;
@@ -106,11 +106,11 @@ static inline int cfs_test_and_set_bit(int nr, volatile void *addr)
     return rc;
 }
 
-#define ext2_set_bit(nr,addr)   (cfs_set_bit(nr, addr), 0)
-#define ext2_clear_bit(nr,addr)        (cfs_clear_bit(nr, addr), 0)
-#define ext2_test_bit(nr,addr)  cfs_test_bit(nr, addr)
+#define ext2_set_bit(nr, addr)         (set_bit(nr, addr), 0)
+#define ext2_clear_bit(nr, addr)       (clear_bit(nr, addr), 0)
+#define ext2_test_bit(nr, addr)                test_bit(nr, addr)
 
-static inline int cfs_ffs(int x)
+static inline int ffs(int x)
 {
         int r = 1;
 
@@ -178,7 +178,7 @@ static inline unsigned long __cfs_ffs(unsigned long word)
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
 static inline
-int cfs_fls(int x)
+int fls(int x)
 {
         int r = 32;
 
@@ -207,7 +207,7 @@ int cfs_fls(int x)
         return r;
 }
 
-static inline unsigned cfs_find_first_bit(const unsigned long *addr,
+static inline unsigned find_first_bit(const unsigned long *addr,
                                           unsigned size)
 {
         unsigned x = 0;
index af09129..c78101b 100644 (file)
@@ -238,7 +238,7 @@ struct inode {
         int             i_uid;
         int             i_gid;
         __u32           i_flags;
-        cfs_mutex_t     i_sem;
+       struct mutex    i_sem;
         void *          i_priv;
 };
 
index 169cc02..7a5e9fe 100644 (file)
@@ -58,7 +58,7 @@
  *  spinlock & event definitions
  */
 
-typedef struct cfs_spin_lock cfs_spinlock_t;
+typedef struct spin_lock spinlock_t;
 
 /* atomic */
 
@@ -86,7 +86,7 @@ int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
 #define cfs_atomic_inc_return(v)  cfs_atomic_add_return(1, v)
 #define cfs_atomic_dec_return(v)  cfs_atomic_sub_return(1, v)
 
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock);
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock);
 
 /* event */
 
@@ -213,43 +213,43 @@ cfs_clear_event(event_t * event)
  *
  */
 
-struct cfs_spin_lock {
-    KSPIN_LOCK lock;
-    KIRQL      irql;
+struct spin_lock {
+       KSPIN_LOCK      lock;
+       KIRQL           irql;
 };
 
-#define CFS_DECL_SPIN(name)  cfs_spinlock_t name;
-#define CFS_DECL_SPIN_EXTERN(name)  extern cfs_spinlock_t name;
+#define CFS_DECL_SPIN(name)            spinlock_t name;
+#define CFS_DECL_SPIN_EXTERN(name)     extern spinlock_t name;
 
 #define DEFINE_SPINLOCK {0}
 
-static inline void cfs_spin_lock_init(cfs_spinlock_t *lock)
+static inline void spin_lock_init(spinlock_t *lock)
 {
-    KeInitializeSpinLock(&(lock->lock));
+       KeInitializeSpinLock(&(lock->lock));
 }
 
-static inline void cfs_spin_lock(cfs_spinlock_t *lock)
+static inline void spin_lock(spinlock_t *lock)
 {
-    KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+       KeAcquireSpinLock(&(lock->lock), &(lock->irql));
 }
 
-static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass)
+static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
 {
-    KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+       KeAcquireSpinLock(&(lock->lock), &(lock->irql));
 }
 
-static inline void cfs_spin_unlock(cfs_spinlock_t *lock)
+static inline void spin_unlock(spinlock_t *lock)
 {
-    KIRQL       irql = lock->irql;
-    KeReleaseSpinLock(&(lock->lock), irql);
+       KIRQL   irql = lock->irql;
+       KeReleaseSpinLock(&(lock->lock), irql);
 }
 
 
-#define cfs_spin_lock_irqsave(lock, flags)  \
-do {(flags) = 0; cfs_spin_lock(lock);} while(0)
+#define spin_lock_irqsave(lock, flags)  \
+       do { (flags) = 0; spin_lock(lock); } while (0)
 
-#define cfs_spin_unlock_irqrestore(lock, flags) \
-do {cfs_spin_unlock(lock);} while(0)
+#define spin_unlock_irqrestore(lock, flags) \
+       do { spin_unlock(lock); } while (0)
 
 
 /* There's no  corresponding routine in windows kernel.
@@ -259,78 +259,78 @@ do {cfs_spin_unlock(lock);} while(0)
 
 extern int libcfs_mp_system;
 
-static int cfs_spin_trylock(cfs_spinlock_t *lock)
+static int spin_trylock(spinlock_t *lock)
 {
-    KIRQL   Irql;
-    int     rc = 0;
+       KIRQL   Irql;
+       int     rc = 0;
 
-    ASSERT(lock != NULL);
+       ASSERT(lock != NULL);
 
-    KeRaiseIrql(DISPATCH_LEVEL, &Irql);
+       KeRaiseIrql(DISPATCH_LEVEL, &Irql);
 
-    if (libcfs_mp_system) {
-        if (0 == (ulong_ptr_t)lock->lock) {
+       if (libcfs_mp_system) {
+               if (0 == (ulong_ptr_t)lock->lock) {
 #if _X86_
-            __asm {
-                mov  edx, dword ptr [ebp + 8]
-                lock bts dword ptr[edx], 0
-                jb   lock_failed
-                mov  rc, TRUE
-            lock_failed:
-            }
+                       __asm {
+                               mov  edx, dword ptr [ebp + 8]
+                               lock bts dword ptr[edx], 0
+                               jb   lock_failed
+                               mov  rc, TRUE
+                               lock_failed:
+                       }
 #else
-        KdBreakPoint();
+                       KdBreakPoint();
 #endif
 
-        }
-    } else {
-        rc = TRUE;
-    }
+               }
+       } else {
+               rc = TRUE;
+       }
 
-    if (rc) {
-        lock->irql = Irql;
-    } else {
-        KeLowerIrql(Irql);
-    }
+       if (rc) {
+               lock->irql = Irql;
+       } else {
+               KeLowerIrql(Irql);
+       }
 
-    return rc;
+       return rc;
 }
 
-static int cfs_spin_is_locked(cfs_spinlock_t *lock)
+static int spin_is_locked(spinlock_t *lock)
 {
 #if _WIN32_WINNT >= 0x502
-    /* KeTestSpinLock only avalilable on 2k3 server or later */
-    return (!KeTestSpinLock(&lock->lock));
+       /* KeTestSpinLock only avalilable on 2k3 server or later */
+       return !KeTestSpinLock(&lock->lock);
 #else
-    return (int) (lock->lock);
+       return (int) (lock->lock);
 #endif
 }
 
 /* synchronization between cpus: it will disable all DPCs
    kernel task scheduler on the CPU */
-#define cfs_spin_lock_bh(x)                cfs_spin_lock(x)
-#define cfs_spin_unlock_bh(x)      cfs_spin_unlock(x)
-#define cfs_spin_lock_bh_init(x)       cfs_spin_lock_init(x)
+#define spin_lock_bh(x)                spin_lock(x)
+#define spin_unlock_bh(x)      spin_unlock(x)
+#define spin_lock_bh_init(x)   spin_lock_init(x)
 
 /*
- * cfs_rw_semaphore (using ERESOURCE)
+ * rw_semaphore (using ERESOURCE)
  */
 
 
-typedef struct cfs_rw_semaphore {
-    ERESOURCE   rwsem;
-} cfs_rw_semaphore_t;
+struct rw_semaphore {
+       ERESOURCE       rwsem;
+};
 
 
-#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name
-#define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name
+#define DECLARE_RWSEM(name) struct rw_semaphore name
+#define CFS_DECLARE_RWSEM_EXTERN(name) extern struct rw_semaphore name
 
 /*
- * cfs_init_rwsem
- *   To initialize the the cfs_rw_semaphore_t structure
+ * init_rwsem
+ *   To initialize the the rw_semaphore structure
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the rw_semaphore structure
  *
  * Return Value:
  *   N/A
@@ -339,18 +339,18 @@ typedef struct cfs_rw_semaphore {
  *   N/A
  */
 
-static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
+static inline void init_rwsem(struct rw_semaphore *s)
 {
        ExInitializeResourceLite(&s->rwsem);
 }
-#define rwsem_init cfs_init_rwsem
+#define rwsem_init init_rwsem
 
 /*
- * cfs_fini_rwsem
- *   To finilize/destroy the the cfs_rw_semaphore_t structure
+ * fini_rwsem
+ *   To finilize/destroy the the rw_semaphore structure
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the rw_semaphore structure
  *
  * Return Value:
  *   N/A
@@ -360,17 +360,17 @@ static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
  *   Just define it NULL for other systems.
  */
 
-static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
+static inline void fini_rwsem(struct rw_semaphore *s)
 {
-    ExDeleteResourceLite(&s->rwsem);
+       ExDeleteResourceLite(&s->rwsem);
 }
 
 /*
- * cfs_down_read
- *   To acquire read-lock of the cfs_rw_semaphore
+ * down_read
+ *   To acquire read-lock of the rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   N/A
@@ -379,19 +379,19 @@ static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void cfs_down_read(cfs_rw_semaphore_t *s)
+static inline void down_read(struct rw_semaphore *s)
 {
        ExAcquireResourceSharedLite(&s->rwsem, TRUE);
 }
-#define cfs_down_read_nested cfs_down_read
+#define down_read_nested down_read
 
 
 /*
- * cfs_down_read_trylock
- *   To acquire read-lock of the cfs_rw_semaphore without blocking
+ * down_read_trylock
+ *   To acquire read-lock of the rw_semaphore without blocking
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   Zero: failed to acquire the read lock
@@ -401,18 +401,18 @@ static inline void cfs_down_read(cfs_rw_semaphore_t *s)
  *   This routine will return immediately without waiting.
  */
 
-static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
+static inline int down_read_trylock(struct rw_semaphore *s)
 {
        return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
 }
 
 
 /*
- * cfs_down_write
- *   To acquire write-lock of the cfs_rw_semaphore
+ * down_write
+ *   To acquire write-lock of the struct rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   N/A
@@ -421,18 +421,18 @@ static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void cfs_down_write(cfs_rw_semaphore_t *s)
+static inline void down_write(struct rw_semaphore *s)
 {
        ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
 }
-#define cfs_down_write_nested cfs_down_write
+#define down_write_nested down_write
 
 /*
  * down_write_trylock
- *   To acquire write-lock of the cfs_rw_semaphore without blocking
+ *   To acquire write-lock of the rw_semaphore without blocking
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   Zero: failed to acquire the write lock
@@ -442,18 +442,18 @@ static inline void cfs_down_write(cfs_rw_semaphore_t *s)
  *   This routine will return immediately without waiting.
  */
 
-static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
+static inline int down_write_trylock(struct rw_semaphore *s)
 {
-    return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
+       return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
 }
 
 
 /*
- * cfs_up_read
- *   To release read-lock of the cfs_rw_semaphore
+ * up_read
+ *   To release read-lock of the rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   N/A
@@ -462,20 +462,19 @@ static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void cfs_up_read(cfs_rw_semaphore_t *s)
+static inline void up_read(struct rw_semaphore *s)
 {
-    ExReleaseResourceForThreadLite(
-            &(s->rwsem),
-            ExGetCurrentResourceThread());
+       ExReleaseResourceForThreadLite(&(s->rwsem),
+                                      ExGetCurrentResourceThread());
 }
 
 
 /*
- * cfs_up_write
- *   To release write-lock of the cfs_rw_semaphore
+ * up_write
+ *   To release write-lock of the rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   N/A
@@ -484,11 +483,10 @@ static inline void cfs_up_read(cfs_rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void cfs_up_write(cfs_rw_semaphore_t *s)
+static inline void up_write(struct rw_semaphore *s)
 {
-    ExReleaseResourceForThreadLite(
-                &(s->rwsem),
-                ExGetCurrentResourceThread());
+       ExReleaseResourceForThreadLite(&(s->rwsem),
+                                      ExGetCurrentResourceThread());
 }
 
 /*
@@ -502,37 +500,37 @@ static inline void cfs_up_write(cfs_rw_semaphore_t *s)
  */
 
 typedef struct {
-    cfs_spinlock_t guard;
-    int            count;
-} cfs_rwlock_t;
+       spinlock_t      guard;
+       int             count;
+} rwlock_t;
 
-void cfs_rwlock_init(cfs_rwlock_t * rwlock);
-void cfs_rwlock_fini(cfs_rwlock_t * rwlock);
+void rwlock_init(rwlock_t *rwlock);
+void cfs_rwlock_fini(rwlock_t *rwlock);
 
-void cfs_read_lock(cfs_rwlock_t * rwlock);
-void cfs_read_unlock(cfs_rwlock_t * rwlock);
-void cfs_write_lock(cfs_rwlock_t * rwlock);
-void cfs_write_unlock(cfs_rwlock_t * rwlock);
+void read_lock(rwlock_t *rwlock);
+void read_unlock(rwlock_t *rwlock);
+void write_lock(rwlock_t *rwlock);
+void write_unlock(rwlock_t *rwlock);
 
-#define cfs_write_lock_irqsave(l, f)     do {f = 0; cfs_write_lock(l);} while(0)
-#define cfs_write_unlock_irqrestore(l, f)   do {cfs_write_unlock(l);} while(0)
-#define cfs_read_lock_irqsave(l, f         do {f=0; cfs_read_lock(l);} while(0)
-#define cfs_read_unlock_irqrestore(l, f)    do {cfs_read_unlock(l);} while(0)
+#define write_lock_irqsave(l, f)       do { f = 0; write_lock(l); } while (0)
+#define write_unlock_irqrestore(l, f)  do { write_unlock(l); } while (0)
+#define read_lock_irqsave(l, f)                do { f = 0; read_lock(l); } while (0)
+#define read_unlock_irqrestore(l, f)   do { read_unlock(l); } while (0)
 
-#define cfs_write_lock_bh   cfs_write_lock
-#define cfs_write_unlock_bh cfs_write_unlock
+#define write_lock_bh          write_lock
+#define write_unlock_bh        write_unlock
 
-typedef struct cfs_lock_class_key {
-        int foo;
-} cfs_lock_class_key_t;
+struct lock_class_key {
+       int foo;
+};
 
-#define cfs_lockdep_set_class(lock, class) do {} while(0)
+#define lockdep_set_class(lock, class) do {} while (0)
 
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
 {
 }
 
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
 {
 }
 
@@ -544,38 +542,35 @@ static inline void cfs_lockdep_on(void)
  * - __up(x)
  */
 
-typedef struct cfs_semaphore {
+struct semaphore {
        KSEMAPHORE sem;
-} cfs_semaphore_t;
+};
 
-static inline void cfs_sema_init(cfs_semaphore_t *s, int val)
+static inline void sema_init(struct semaphore *s, int val)
 {
        KeInitializeSemaphore(&s->sem, val, val);
 }
 
-static inline void __down(cfs_semaphore_t *s)
+static inline void __down(struct semaphore *s)
 {
-   KeWaitForSingleObject( &(s->sem), Executive,
-                          KernelMode, FALSE, NULL );
+       KeWaitForSingleObject(&(s->sem), Executive, KernelMode, FALSE, NULL);
 
 }
-static inline void __up(cfs_semaphore_t *s)
+static inline void __up(struct semaphore *s)
 {
        KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
 }
 
-static inline int down_trylock(cfs_semaphore_t *s)
+static inline int down_trylock(struct semaphore *s)
 {
-    LARGE_INTEGER  timeout = {0};
-    NTSTATUS status =
-        KeWaitForSingleObject( &(s->sem), Executive,
-                               KernelMode, FALSE, &timeout);
+       LARGE_INTEGER  timeout = {0};
+       NTSTATUS status = KeWaitForSingleObject(&(s->sem), Executive,
+                                               KernelMode, FALSE, &timeout);
 
-    if (status == STATUS_SUCCESS) {
-        return 0;
-    }
+       if (status == STATUS_SUCCESS)
+               return 0;
 
-    return 1;
+       return 1;
 }
 
 /*
@@ -587,9 +582,9 @@ static inline int down_trylock(cfs_semaphore_t *s)
  * - mutex_down(x)
  */
 
-typedef struct cfs_semaphore cfs_mutex_t;
+#define mutex semaphore
 
-#define CFS_DECLARE_MUTEX(x) cfs_mutex_t x
+#define CFS_DECLARE_MUTEX(x) struct mutex x
 
 /*
  * init_mutex
@@ -604,10 +599,10 @@ typedef struct cfs_semaphore cfs_mutex_t;
  * Notes:
  *   N/A
  */
-#define cfs_mutex_init cfs_init_mutex
-static inline void cfs_init_mutex(cfs_mutex_t *mutex)
+#define mutex_init cfs_init_mutex
+static inline void cfs_init_mutex(struct mutex *mutex)
 {
-    cfs_sema_init(mutex, 1);
+       sema_init(mutex, 1);
 }
 
 /*
@@ -624,22 +619,22 @@ static inline void cfs_init_mutex(cfs_mutex_t *mutex)
  *   N/A
  */
 
-static inline void cfs_mutex_down(cfs_mutex_t *mutex)
+static inline void cfs_mutex_down(struct mutex *mutex)
 {
-    __down(mutex);
+       __down(mutex);
 }
 
-static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex)
+static inline int cfs_mutex_down_interruptible(struct mutex *mutex)
 {
-    __down(mutex);
-    return 0;
+       __down(mutex);
+       return 0;
 }
 
-#define cfs_mutex_lock(m)         cfs_mutex_down(m)
-#define cfs_mutex_trylock(s)      down_trylock(s)
-#define cfs_mutex_lock_nested(m)  cfs_mutex_down(m)
-#define cfs_down(m)               cfs_mutex_down(m)
-#define cfs_down_interruptible(m) cfs_mutex_down_interruptible(m)
+#define mutex_lock(m)          cfs_mutex_down(m)
+#define mutex_trylock(s)       down_trylock(s)
+#define mutex_lock_nested(m)   cfs_mutex_down(m)
+#define down(m)                        cfs_mutex_down(m)
+#define down_interruptible(m)  cfs_mutex_down_interruptible(m)
 
 /*
  * mutex_up
@@ -655,13 +650,13 @@ static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex)
  *   N/A
  */
 
-static inline void cfs_mutex_up(cfs_mutex_t *mutex)
+static inline void cfs_mutex_up(struct mutex *mutex)
 {
-    __up(mutex);
+       __up(mutex);
 }
 
-#define cfs_mutex_unlock(m) cfs_mutex_up(m)
-#define cfs_up(m)           cfs_mutex_up(m)
+#define mutex_unlock(m)                cfs_mutex_up(m)
+#define up(m)                  cfs_mutex_up(m)
 
 /*
  * init_mutex_locked
@@ -677,13 +672,13 @@ static inline void cfs_mutex_up(cfs_mutex_t *mutex)
  *   N/A
  */
 
-static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex)
+static inline void cfs_init_mutex_locked(struct mutex *mutex)
 {
-    cfs_init_mutex(mutex);
-    cfs_mutex_down(mutex);
+       cfs_init_mutex(mutex);
+       cfs_mutex_down(mutex);
 }
 
-static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
+static inline void mutex_destroy(struct mutex *mutex)
 {
 }
 
@@ -695,9 +690,9 @@ static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
  * - wait_for_completion(c)
  */
 
-typedef struct {
+struct completion{
        event_t  event;
-} cfs_completion_t;
+};
 
 
 /*
@@ -714,7 +709,7 @@ typedef struct {
  *   N/A
  */
 
-static inline void cfs_init_completion(cfs_completion_t *c)
+static inline void init_completion(struct completion *c)
 {
        cfs_init_event(&(c->event), 1, FALSE);
 }
@@ -734,7 +729,7 @@ static inline void cfs_init_completion(cfs_completion_t *c)
  *   N/A
  */
 
-static inline void cfs_complete(cfs_completion_t *c)
+static inline void complete(struct completion *c)
 {
        cfs_wake_event(&(c->event));
 }
@@ -754,17 +749,16 @@ static inline void cfs_complete(cfs_completion_t *c)
  *   N/A
  */
 
-static inline void cfs_wait_for_completion(cfs_completion_t *c)
+static inline void wait_for_completion(struct completion *c)
 {
-    cfs_wait_event_internal(&(c->event), 0);
+       cfs_wait_event_internal(&(c->event), 0);
 }
 
-static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
+static inline int wait_for_completion_interruptible(struct completion *c)
 {
-    cfs_wait_event_internal(&(c->event), 0);
-    return 0;
+       cfs_wait_event_internal(&(c->event), 0);
+       return 0;
 }
 
-#else  /* !__KERNEL__ */
 #endif /* !__KERNEL__ */
 #endif
index 619efc9..16112b2 100644 (file)
@@ -106,42 +106,42 @@ typedef struct cfs_page {
 
 /* Make it prettier to test the above... */
 #define UnlockPage(page)        unlock_page(page)
-#define Page_Uptodate(page)     cfs_test_bit(PG_uptodate, &(page)->flags)
-#define SetPageUptodate(page) \
+#define Page_Uptodate(page)     test_bit(PG_uptodate, &(page)->flags)
+#define SetPageUptodate(page)                                          \
        do {                                                            \
                arch_set_page_uptodate(page);                           \
-               cfs_set_bit(PG_uptodate, &(page)->flags);               \
+               set_bit(PG_uptodate, &(page)->flags);                   \
        } while (0)
-#define ClearPageUptodate(page) cfs_clear_bit(PG_uptodate, &(page)->flags)
-#define PageDirty(page)         cfs_test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page)      cfs_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page)    cfs_clear_bit(PG_dirty, &(page)->flags)
-#define PageLocked(page)        cfs_test_bit(PG_locked, &(page)->flags)
-#define LockPage(page)          cfs_set_bit(PG_locked, &(page)->flags)
-#define TryLockPage(page)       cfs_test_and_set_bit(PG_locked, &(page)->flags)
-#define PageChecked(page)       cfs_test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page)    cfs_set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page)  cfs_clear_bit(PG_checked, &(page)->flags)
-#define PageLaunder(page)       cfs_test_bit(PG_launder, &(page)->flags)
-#define SetPageLaunder(page)    cfs_set_bit(PG_launder, &(page)->flags)
-#define ClearPageLaunder(page)  cfs_clear_bit(PG_launder, &(page)->flags)
-#define ClearPageArch1(page)    cfs_clear_bit(PG_arch_1, &(page)->flags)
-
-#define PageError(page)                cfs_test_bit(PG_error, &(page)->flags)
-#define SetPageError(page)     cfs_set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page)   cfs_clear_bit(PG_error, &(page)->flags)
-#define PageReferenced(page)    cfs_test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) cfs_set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) cfs_clear_bit(PG_referenced, &(page)->flags)
-
-#define PageActive(page)        cfs_test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page)     cfs_set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page)   cfs_clear_bit(PG_active, &(page)->flags)
-
-#define PageWriteback(page)    cfs_test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) cfs_test_and_set_bit(PG_writeback,  \
+#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
+#define PageDirty(page)        test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page)     set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page)   clear_bit(PG_dirty, &(page)->flags)
+#define PageLocked(page)       test_bit(PG_locked, &(page)->flags)
+#define LockPage(page)         set_bit(PG_locked, &(page)->flags)
+#define TryLockPage(page)      test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page)      test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page)   set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+#define PageLaunder(page)      test_bit(PG_launder, &(page)->flags)
+#define SetPageLaunder(page)   set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
+#define ClearPageArch1(page)   clear_bit(PG_arch_1, &(page)->flags)
+
+#define PageError(page)        test_bit(PG_error, &(page)->flags)
+#define SetPageError(page)     set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
+#define PageReferenced(page)   test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
+
+#define PageActive(page)        test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page)     set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page)   clear_bit(PG_active, &(page)->flags)
+
+#define PageWriteback(page)    test_bit(PG_writeback, &(page)->flags)
+#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback,      \
                                                        &(page)->flags)
-#define TestClearPageWriteback(page) cfs_test_and_clear_bit(PG_writeback, \
+#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback,  \
                                                        &(page)->flags)
 
 #define __GFP_FS    (1)
index 4cf73c6..5ac1fc0 100644 (file)
@@ -319,7 +319,7 @@ struct seq_file {
        size_t count;
        loff_t index;
        u32    version;
-       cfs_mutex_t lock;
+       struct mutex            lock;
        const struct seq_operations *op;
        void *private;
 };
@@ -409,12 +409,11 @@ typedef int cfs_task_state_t;
 #define CFS_WAITLINK_MAGIC  'CWLM'
 
 typedef struct cfs_waitq {
+       unsigned int            magic;
+       unsigned int            flags;
 
-    unsigned int            magic;
-    unsigned int            flags;
-
-    cfs_spinlock_t          guard;
-    cfs_list_t              waiters;
+       spinlock_t              guard;
+       cfs_list_t              waiters;
 
 } cfs_waitq_t;
 
@@ -613,17 +612,15 @@ static inline void task_unlock(cfs_task_t *t)
 #define TASKSLT_MAGIC  'TSLT'   /* Task Slot */
 
 typedef struct _TASK_MAN {
+       ULONG           Magic;          /* Magic and Flags */
+       ULONG           Flags;
 
-    ULONG           Magic;      /* Magic and Flags */
-    ULONG           Flags;
-
-    cfs_spinlock_t  Lock;       /* Protection lock */
-
-    cfs_mem_cache_t *slab; /* Memory slab for task slot */
+       spinlock_t      Lock;           /* Protection lock */
 
-    ULONG           NumOfTasks; /* Total tasks (threads) */
-    LIST_ENTRY      TaskList;   /* List of task slots */
+       cfs_mem_cache_t *slab;          /* Memory slab for task slot */
 
+       ULONG           NumOfTasks;     /* Total tasks (threads) */
+       LIST_ENTRY      TaskList;       /* List of task slots */
 } TASK_MAN, *PTASK_MAN;
 
 typedef struct _TASK_SLOT {
index 7f9e45e..d014d7f 100644 (file)
@@ -232,11 +232,11 @@ typedef struct _KS_TSDU_MDL {
 } KS_TSDU_MDL, *PKS_TSDU_MDL;
 
 typedef struct ks_engine_mgr {
-    cfs_spinlock_t          lock;
-    int                     stop;
-    event_t                 exit;
-    event_t                 start;
-    cfs_list_t              list;
+       spinlock_t      lock;
+       int             stop;
+       event_t         exit;
+       event_t         start;
+       cfs_list_t      list;
 } ks_engine_mgr_t;
 
 typedef struct ks_engine_slot {
@@ -248,19 +248,19 @@ typedef struct ks_engine_slot {
 } ks_engine_slot_t;
 
 typedef struct _KS_TSDUMGR {
-    cfs_list_t              TsduList;
-    ULONG                   NumOfTsdu;
-    ULONG                   TotalBytes;
-    KEVENT                  Event;
-    cfs_spinlock_t          Lock;
-    ks_engine_slot_t        Slot;
-    ULONG                   Payload;
-    int                     Busy:1;
-    int                     OOB:1;
+       cfs_list_t              TsduList;
+       ULONG                   NumOfTsdu;
+       ULONG                   TotalBytes;
+       KEVENT                  Event;
+       spinlock_t              Lock;
+       ks_engine_slot_t        Slot;
+       ULONG                   Payload;
+       int                     Busy:1;
+       int                     OOB:1;
 } KS_TSDUMGR, *PKS_TSDUMGR;
 
-#define ks_lock_tsdumgr(mgr)   cfs_spin_lock(&((mgr)->Lock))
-#define ks_unlock_tsdumgr(mgr) cfs_spin_unlock(&((mgr)->Lock))
+#define ks_lock_tsdumgr(mgr)   spin_lock(&((mgr)->Lock))
+#define ks_unlock_tsdumgr(mgr) spin_unlock(&((mgr)->Lock))
 
 typedef struct _KS_CHAIN {
     KS_TSDUMGR          Normal;      /* normal queue */
@@ -423,7 +423,7 @@ struct socket {
         ulong                       kstc_magic;      /* Magic & Flags */
         ulong                       kstc_flags;
 
-        cfs_spinlock_t              kstc_lock;       /* serialise lock*/
+       spinlock_t                  kstc_lock;       /* serialise lock*/
         void *                      kstc_conn;       /* ks_conn_t */
 
         ks_tconn_type_t             kstc_type;          /* tdi connection Type */
@@ -614,15 +614,14 @@ typedef struct ks_addr_slot {
 } ks_addr_slot_t;
 
 typedef struct {
+       /*
+        * Tdi client information
+        */
 
-    /*
-     * Tdi client information
-     */
+       UNICODE_STRING  ksnd_client_name;       /* tdi client module name */
+       HANDLE          ksnd_pnp_handle;        /* the handle for pnp changes */
 
-    UNICODE_STRING        ksnd_client_name; /* tdi client module name */
-    HANDLE                ksnd_pnp_handle;  /* the handle for pnp changes */
-
-    cfs_spinlock_t        ksnd_addrs_lock;  /* serialize ip address list access */
+       spinlock_t      ksnd_addrs_lock;        /* serialize ip address list */
     LIST_ENTRY            ksnd_addrs_list;  /* list of the ip addresses */
     int                   ksnd_naddrs;      /* number of the ip addresses */
 
@@ -634,15 +633,15 @@ typedef struct {
 
     TDI_PROVIDER_INFO     ksnd_provider;        /* tdi tcp/ip provider's information */
 
-    cfs_spinlock_t        ksnd_tconn_lock;      /* tdi connections access serialise */
+       spinlock_t      ksnd_tconn_lock;        /* tdi connections access lock*/
+
+       int             ksnd_ntconns;           /* number of tconns in list */
+       cfs_list_t      ksnd_tconns;            /* tdi connections list */
+       cfs_mem_cache_t *ksnd_tconn_slab;       /* ks_tconn_t allocation slabs*/
+       event_t         ksnd_tconn_exit;        /* event signal by last tconn */
 
-    int                   ksnd_ntconns;         /* number of tconns attached in list */
-    cfs_list_t            ksnd_tconns;          /* tdi connections list */
-    cfs_mem_cache_t *     ksnd_tconn_slab;      /* slabs for ks_tconn_t allocations */
-    event_t               ksnd_tconn_exit;      /* exit event to be signaled by the last tconn */
+       spinlock_t      ksnd_tsdu_lock;         /* tsdu access serialise */
 
-    cfs_spinlock_t        ksnd_tsdu_lock;       /* tsdu access serialise */
-        
     int                   ksnd_ntsdus;          /* number of tsdu buffers allocated */
     ulong                 ksnd_tsdu_size;       /* the size of a signel tsdu buffer */
     cfs_mem_cache_t       *ksnd_tsdu_slab;       /* slab cache for tsdu buffer allocation */
index 06c028d..c3f0b56 100644 (file)
@@ -57,23 +57,23 @@ struct cfs_zone_nob {
 };
 
 static struct cfs_zone_nob      cfs_zone_nob;
-static spinlock_t               cfs_zone_guard;
+static spinlock_t              cfs_zone_guard;
 
 cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
 {
-        cfs_mem_cache_t         *walker = NULL;
+       cfs_mem_cache_t         *walker = NULL;
 
-        LASSERT(cfs_zone_nob.z_nob != NULL);
+       LASSERT(cfs_zone_nob.z_nob != NULL);
 
-        spin_lock(&cfs_zone_guard);
-        list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
-                if (!strcmp(walker->mc_name, name) && \
-                    walker->mc_size == objsize)
-                        break;
-        }
-        spin_unlock(&cfs_zone_guard);
+       spin_lock(&cfs_zone_guard);
+       list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
+               if (!strcmp(walker->mc_name, name) && \
+                   walker->mc_size == objsize)
+                       break;
+       }
+       spin_unlock(&cfs_zone_guard);
 
-        return walker;
+       return walker;
 }
 
 /*
@@ -270,18 +270,18 @@ static void raw_page_finish(struct xnu_raw_page *pg)
 
 void raw_page_death_row_clean(void)
 {
-        struct xnu_raw_page *pg;
+       struct xnu_raw_page *pg;
 
-        spin_lock(&page_death_row_phylax);
-        while (!list_empty(&page_death_row)) {
-                pg = container_of(page_death_row.next,
-                                  struct xnu_raw_page, link);
-                list_del(&pg->link);
-                spin_unlock(&page_death_row_phylax);
-                raw_page_finish(pg);
-                spin_lock(&page_death_row_phylax);
-        }
-        spin_unlock(&page_death_row_phylax);
+       spin_lock(&page_death_row_phylax);
+       while (!list_empty(&page_death_row)) {
+               pg = container_of(page_death_row.next,
+                                 struct xnu_raw_page, link);
+               list_del(&pg->link);
+               spin_unlock(&page_death_row_phylax);
+               raw_page_finish(pg);
+               spin_lock(&page_death_row_phylax);
+       }
+       spin_unlock(&page_death_row_phylax);
 }
 
 /* Free a "page" */
@@ -289,20 +289,20 @@ void free_raw_page(struct xnu_raw_page *pg)
 {
        if (!atomic_dec_and_test(&pg->count))
                return;
-        /*
-         * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
-         * block. (raw_page_done()->upl_abort() can block too) On the other
-         * hand, cfs_free_page() may be called in non-blockable context. To
-         * work around this, park pages on global list when cannot block.
-         */
-        if (get_preemption_level() > 0) {
-                spin_lock(&page_death_row_phylax);
-                list_add(&pg->link, &page_death_row);
-                spin_unlock(&page_death_row_phylax);
-        } else {
-                raw_page_finish(pg);
-                raw_page_death_row_clean();
-        }
+       /*
+        * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
+        * block. (raw_page_done()->upl_abort() can block too) On the other
+        * hand, cfs_free_page() may be called in non-blockable context. To
+        * work around this, park pages on global list when cannot block.
+        */
+       if (get_preemption_level() > 0) {
+               spin_lock(&page_death_row_phylax);
+               list_add(&pg->link, &page_death_row);
+               spin_unlock(&page_death_row_phylax);
+       } else {
+               raw_page_finish(pg);
+               raw_page_death_row_clean();
+       }
 }
 
 cfs_page_t *cfs_alloc_page(u_int32_t flags)
@@ -471,22 +471,22 @@ int cfs_mem_init(void)
 
                 cfs_zone_nob.z_nob = nob->z_nob;
         }
-        spin_lock_init(&cfs_zone_guard);
+       spin_lock_init(&cfs_zone_guard);
 #endif
-        CFS_INIT_LIST_HEAD(&page_death_row);
-        spin_lock_init(&page_death_row_phylax);
-        raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
-        return 0;
+       CFS_INIT_LIST_HEAD(&page_death_row);
+       spin_lock_init(&page_death_row_phylax);
+       raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
+       return 0;
 }
 
 void cfs_mem_fini(void)
 {
-        raw_page_death_row_clean();
-        spin_lock_done(&page_death_row_phylax);
-        cfs_mem_cache_destroy(raw_page_cache);
+       raw_page_death_row_clean();
+       spin_lock_done(&page_death_row_phylax);
+       cfs_mem_cache_destroy(raw_page_cache);
 
-#if     CFS_INDIVIDUAL_ZONE
-        cfs_zone_nob.z_nob = NULL;
-        spin_lock_done(&cfs_zone_guard);
+#if CFS_INDIVIDUAL_ZONE
+       cfs_zone_nob.z_nob = NULL;
+       spin_lock_done(&cfs_zone_guard);
 #endif
 }
index 494475d..473edf7 100644 (file)
@@ -85,7 +85,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
                 int count = cfs_atomic_inc_return(&cfs_fail_count);
 
                 if (count >= cfs_fail_val) {
-                        cfs_set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+                       set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
                         cfs_atomic_set(&cfs_fail_count, 0);
                         /* we are lost race to increase  */
                         if (count > cfs_fail_val)
@@ -95,9 +95,9 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
 
         if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
             (value & CFS_FAIL_ONCE))
-                cfs_set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+               set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
         /* Lost race to set CFS_FAILED_BIT. */
-        if (cfs_test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
+       if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
                 /* If CFS_FAIL_ONCE is valid, only one process can fail,
                  * otherwise multi-process can fail at the same time. */
                 if (cfs_fail_loc & CFS_FAIL_ONCE)
index cb260a4..37308a7 100644 (file)
@@ -126,31 +126,31 @@ cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
 static inline void
 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
 {
-        cfs_spin_lock(&lock->spin);
+       spin_lock(&lock->spin);
 }
 
 static inline void
 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
 {
-        cfs_spin_unlock(&lock->spin);
+       spin_unlock(&lock->spin);
 }
 
 static inline void
 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
 {
-        if (!exclusive)
-                cfs_read_lock(&lock->rw);
-        else
-                cfs_write_lock(&lock->rw);
+       if (!exclusive)
+               read_lock(&lock->rw);
+       else
+               write_lock(&lock->rw);
 }
 
 static inline void
 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
 {
-        if (!exclusive)
-                cfs_read_unlock(&lock->rw);
-        else
-                cfs_write_unlock(&lock->rw);
+       if (!exclusive)
+               read_unlock(&lock->rw);
+       else
+               write_unlock(&lock->rw);
 }
 
 /** No lock hash */
@@ -210,15 +210,15 @@ static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
 static void
 cfs_hash_lock_setup(cfs_hash_t *hs)
 {
-        if (cfs_hash_with_no_lock(hs)) {
-                hs->hs_lops = &cfs_hash_nl_lops;
+       if (cfs_hash_with_no_lock(hs)) {
+               hs->hs_lops = &cfs_hash_nl_lops;
 
-        } else if (cfs_hash_with_no_bktlock(hs)) {
-                hs->hs_lops = &cfs_hash_nbl_lops;
-                cfs_spin_lock_init(&hs->hs_lock.spin);
+       } else if (cfs_hash_with_no_bktlock(hs)) {
+               hs->hs_lops = &cfs_hash_nbl_lops;
+               spin_lock_init(&hs->hs_lock.spin);
 
-        } else if (cfs_hash_with_rehash(hs)) {
-                cfs_rwlock_init(&hs->hs_lock.rw);
+       } else if (cfs_hash_with_rehash(hs)) {
+               rwlock_init(&hs->hs_lock.rw);
 
                 if (cfs_hash_with_rw_bktlock(hs))
                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
@@ -506,12 +506,12 @@ cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
                 return;
 
-        cfs_spin_lock(&hs->hs_dep_lock);
-        hs->hs_dep_max  = dep_cur;
-        hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
-        hs->hs_dep_off  = bd->bd_offset;
-        hs->hs_dep_bits = hs->hs_cur_bits;
-        cfs_spin_unlock(&hs->hs_dep_lock);
+       spin_lock(&hs->hs_dep_lock);
+       hs->hs_dep_max  = dep_cur;
+       hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
+       hs->hs_dep_off  = bd->bd_offset;
+       hs->hs_dep_bits = hs->hs_cur_bits;
+       spin_unlock(&hs->hs_dep_lock);
 
        cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
 # endif
@@ -936,14 +936,14 @@ cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
                     cfs_hash_with_no_bktlock(hs))
                         continue;
 
-                if (cfs_hash_with_rw_bktlock(hs))
-                        cfs_rwlock_init(&new_bkts[i]->hsb_lock.rw);
-                else if (cfs_hash_with_spin_bktlock(hs))
-                        cfs_spin_lock_init(&new_bkts[i]->hsb_lock.spin);
-                else
-                        LBUG(); /* invalid use-case */
-        }
-        return new_bkts;
+               if (cfs_hash_with_rw_bktlock(hs))
+                       rwlock_init(&new_bkts[i]->hsb_lock.rw);
+               else if (cfs_hash_with_spin_bktlock(hs))
+                       spin_lock_init(&new_bkts[i]->hsb_lock.spin);
+               else
+                       LBUG(); /* invalid use-case */
+       }
+       return new_bkts;
 }
 
 /**
@@ -960,45 +960,45 @@ static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 static int cfs_hash_dep_print(cfs_workitem_t *wi)
 {
-        cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
-        int         dep;
-        int         bkt;
-        int         off;
-        int         bits;
-
-        cfs_spin_lock(&hs->hs_dep_lock);
-        dep  = hs->hs_dep_max;
-        bkt  = hs->hs_dep_bkt;
-        off  = hs->hs_dep_off;
-        bits = hs->hs_dep_bits;
-        cfs_spin_unlock(&hs->hs_dep_lock);
-
-        LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
-                      hs->hs_name, bits, dep, bkt, off);
-        cfs_spin_lock(&hs->hs_dep_lock);
-        hs->hs_dep_bits = 0; /* mark as workitem done */
-        cfs_spin_unlock(&hs->hs_dep_lock);
-        return 0;
+       cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+       int         dep;
+       int         bkt;
+       int         off;
+       int         bits;
+
+       spin_lock(&hs->hs_dep_lock);
+       dep  = hs->hs_dep_max;
+       bkt  = hs->hs_dep_bkt;
+       off  = hs->hs_dep_off;
+       bits = hs->hs_dep_bits;
+       spin_unlock(&hs->hs_dep_lock);
+
+       LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
+                     hs->hs_name, bits, dep, bkt, off);
+       spin_lock(&hs->hs_dep_lock);
+       hs->hs_dep_bits = 0; /* mark as workitem done */
+       spin_unlock(&hs->hs_dep_lock);
+       return 0;
 }
 
 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
 {
-       cfs_spin_lock_init(&hs->hs_dep_lock);
+       spin_lock_init(&hs->hs_dep_lock);
        cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
 }
 
 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
 {
        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
-                return;
+               return;
 
-        cfs_spin_lock(&hs->hs_dep_lock);
-        while (hs->hs_dep_bits != 0) {
-                cfs_spin_unlock(&hs->hs_dep_lock);
-                cfs_cond_resched();
-                cfs_spin_lock(&hs->hs_dep_lock);
-        }
-        cfs_spin_unlock(&hs->hs_dep_lock);
+       spin_lock(&hs->hs_dep_lock);
+       while (hs->hs_dep_bits != 0) {
+               spin_unlock(&hs->hs_dep_lock);
+               cfs_cond_resched();
+               spin_lock(&hs->hs_dep_lock);
+       }
+       spin_unlock(&hs->hs_dep_lock);
 }
 
 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
@@ -2107,7 +2107,7 @@ int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
                 if (maxdep < bd.bd_bucket->hsb_depmax) {
                         maxdep  = bd.bd_bucket->hsb_depmax;
 #ifdef __KERNEL__
-                        maxdepb = cfs_ffz(~maxdep);
+                       maxdepb = ffz(~maxdep);
 #endif
                 }
                 total += bd.bd_bucket->hsb_count;
index 1e572f4..d834630 100644 (file)
@@ -192,7 +192,7 @@ struct kkuc_reg {
 };
 static cfs_list_t kkuc_groups[KUC_GRP_MAX+1] = {};
 /* Protect message sending against remove and adds */
-static CFS_DECLARE_RWSEM(kg_sem);
+static DECLARE_RWSEM(kg_sem);
 
 /** Add a receiver to a broadcast group
  * @param filp pipe to write into
@@ -221,11 +221,11 @@ int libcfs_kkuc_group_add(cfs_file_t *filp, int uid, int group, __u32 data)
         reg->kr_uid = uid;
         reg->kr_data = data;
 
-        cfs_down_write(&kg_sem);
+       down_write(&kg_sem);
         if (kkuc_groups[group].next == NULL)
                 CFS_INIT_LIST_HEAD(&kkuc_groups[group]);
         cfs_list_add(&reg->kr_chain, &kkuc_groups[group]);
-        cfs_up_write(&kg_sem);
+       up_write(&kg_sem);
 
         CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group);
 
@@ -252,7 +252,7 @@ int libcfs_kkuc_group_rem(int uid, int group)
                 libcfs_kkuc_group_put(group, &lh);
         }
 
-        cfs_down_write(&kg_sem);
+       down_write(&kg_sem);
         cfs_list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
                 if ((uid == 0) || (uid == reg->kr_uid)) {
                         cfs_list_del(&reg->kr_chain);
@@ -263,7 +263,7 @@ int libcfs_kkuc_group_rem(int uid, int group)
                         cfs_free(reg);
                 }
         }
-        cfs_up_write(&kg_sem);
+       up_write(&kg_sem);
 
         RETURN(0);
 }
@@ -275,7 +275,7 @@ int libcfs_kkuc_group_put(int group, void *payload)
         int rc = 0;
         ENTRY;
 
-        cfs_down_read(&kg_sem);
+       down_read(&kg_sem);
         cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
                 if (reg->kr_fp != NULL) {
                 rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
@@ -285,7 +285,7 @@ int libcfs_kkuc_group_put(int group, void *payload)
                         }
                 }
         }
-        cfs_up_read(&kg_sem);
+       up_read(&kg_sem);
 
         RETURN(rc);
 }
@@ -313,13 +313,13 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
         if (kkuc_groups[group].next == NULL)
                 RETURN(0);
 
-        cfs_down_read(&kg_sem);
+       down_read(&kg_sem);
         cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
                 if (reg->kr_fp != NULL) {
                         rc = cb_func(reg->kr_data, cb_arg);
                 }
         }
-        cfs_up_read(&kg_sem);
+       up_read(&kg_sem);
 
         RETURN(rc);
 }
index a587e95..adf157f 100644 (file)
@@ -62,7 +62,7 @@ struct cfs_percpt_lock *
 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
 {
        struct cfs_percpt_lock  *pcl;
-       cfs_spinlock_t          *lock;
+       spinlock_t              *lock;
        int                     i;
 
        /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
@@ -78,7 +78,7 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
        }
 
        cfs_percpt_for_each(lock, i, pcl->pcl_locks)
-               cfs_spin_lock_init(lock);
+               spin_lock_init(lock);
 
        return pcl;
 }
@@ -109,13 +109,13 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
        }
 
        if (likely(index != CFS_PERCPT_LOCK_EX)) {
-               cfs_spin_lock(pcl->pcl_locks[index]);
+               spin_lock(pcl->pcl_locks[index]);
                return;
        }
 
        /* exclusive lock request */
        for (i = 0; i < ncpt; i++) {
-               cfs_spin_lock(pcl->pcl_locks[i]);
+               spin_lock(pcl->pcl_locks[i]);
                if (i == 0) {
                        LASSERT(!pcl->pcl_locked);
                        /* nobody should take private lock after this
@@ -136,7 +136,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
        index = ncpt == 1 ? 0 : index;
 
        if (likely(index != CFS_PERCPT_LOCK_EX)) {
-               cfs_spin_unlock(pcl->pcl_locks[index]);
+               spin_unlock(pcl->pcl_locks[index]);
                return;
        }
 
@@ -145,7 +145,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
                        LASSERT(pcl->pcl_locked);
                        pcl->pcl_locked = 0;
                }
-               cfs_spin_unlock(pcl->pcl_locks[i]);
+               spin_unlock(pcl->pcl_locks[i]);
        }
 }
 CFS_EXPORT_SYMBOL(cfs_percpt_unlock);
index 3c5956e..fe0b63a 100644 (file)
 
 int oom_get_adj(struct task_struct *task, int scope)
 {
-
-        int oom_adj;
+       int oom_adj;
 #ifdef HAVE_OOMADJ_IN_SIG
-        unsigned long flags;
+       unsigned long flags;
 
-        spin_lock_irqsave(&task->sighand->siglock, flags);
-        oom_adj = task->signal->oom_adj;
-        task->signal->oom_adj = scope;
-        spin_unlock_irqrestore(&task->sighand->siglock, flags);
+       spin_lock_irqsave(&task->sighand->siglock, flags);
+       oom_adj = task->signal->oom_adj;
+       task->signal->oom_adj = scope;
+       spin_unlock_irqrestore(&task->sighand->siglock, flags);
 
 #else
-        oom_adj = task->oomkilladj;
-        task->oomkilladj = scope;
+       oom_adj = task->oomkilladj;
+       task->oomkilladj = scope;
 #endif
-        return oom_adj;
+       return oom_adj;
 }
 
 int cfs_create_thread(int (*fn)(void *),
index 1df8542..aa9c377 100644 (file)
@@ -105,11 +105,11 @@ EXPORT_SYMBOL(cfs_waitq_add_exclusive);
 void
 cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
 {
-        unsigned long flags;
+       unsigned long flags;
 
-        spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-        __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-        spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+       spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+       __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
+       spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
 }
 EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
 
index 411178c..95b55ce 100644 (file)
@@ -47,7 +47,7 @@ static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
 
 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
 
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
 
 int cfs_tracefile_init_arch()
 {
@@ -55,7 +55,7 @@ int cfs_tracefile_init_arch()
        int    j;
        struct cfs_trace_cpu_data *tcd;
 
-       cfs_init_rwsem(&cfs_tracefile_sem);
+       init_rwsem(&cfs_tracefile_sem);
 
        /* initialize trace_data */
        memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
@@ -70,7 +70,7 @@ int cfs_tracefile_init_arch()
 
        /* arch related info initialized */
        cfs_tcd_for_each(tcd, i, j) {
-               cfs_spin_lock_init(&tcd->tcd_lock);
+               spin_lock_init(&tcd->tcd_lock);
                tcd->tcd_pages_factor = pages_factor[i];
                tcd->tcd_type = i;
                tcd->tcd_cpu = j;
@@ -111,27 +111,27 @@ void cfs_tracefile_fini_arch()
                cfs_trace_data[i] = NULL;
        }
 
-       cfs_fini_rwsem(&cfs_tracefile_sem);
+       fini_rwsem(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_read_lock()
 {
-       cfs_down_read(&cfs_tracefile_sem);
+       down_read(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_read_unlock()
 {
-       cfs_up_read(&cfs_tracefile_sem);
+       up_read(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_write_lock()
 {
-       cfs_down_write(&cfs_tracefile_sem);
+       down_write(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_write_unlock()
 {
-       cfs_up_write(&cfs_tracefile_sem);
+       up_write(&cfs_tracefile_sem);
 }
 
 cfs_trace_buf_type_t cfs_trace_buf_idx_get()
@@ -153,28 +153,28 @@ cfs_trace_buf_type_t cfs_trace_buf_idx_get()
 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
 {
        __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
-        if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
-                cfs_spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
-        else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
-                cfs_spin_lock_bh(&tcd->tcd_lock);
-        else if (unlikely(walking))
-                cfs_spin_lock_irq(&tcd->tcd_lock);
-        else
-                cfs_spin_lock(&tcd->tcd_lock);
+       if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+               spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+       else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+               spin_lock_bh(&tcd->tcd_lock);
+       else if (unlikely(walking))
+               spin_lock_irq(&tcd->tcd_lock);
+       else
+               spin_lock(&tcd->tcd_lock);
        return 1;
 }
 
 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
 {
        __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
-        if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
-                cfs_spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
-        else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
-                cfs_spin_unlock_bh(&tcd->tcd_lock);
-        else if (unlikely(walking))
-                cfs_spin_unlock_irq(&tcd->tcd_lock);
-        else
-                cfs_spin_unlock(&tcd->tcd_lock);
+       if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+               spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
+       else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+               spin_unlock_bh(&tcd->tcd_lock);
+       else if (unlikely(walking))
+               spin_unlock_irq(&tcd->tcd_lock);
+       else
+               spin_unlock(&tcd->tcd_lock);
 }
 
 int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
index b6cb94f..def2036 100644 (file)
@@ -183,19 +183,19 @@ static int libcfs_psdev_release(unsigned long flags, void *args)
         RETURN(0);
 }
 
-static cfs_rw_semaphore_t ioctl_list_sem;
+static struct rw_semaphore ioctl_list_sem;
 static cfs_list_t ioctl_list;
 
 int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
 {
         int rc = 0;
 
-        cfs_down_write(&ioctl_list_sem);
+       down_write(&ioctl_list_sem);
         if (!cfs_list_empty(&hand->item))
                 rc = -EBUSY;
         else
                 cfs_list_add_tail(&hand->item, &ioctl_list);
-        cfs_up_write(&ioctl_list_sem);
+       up_write(&ioctl_list_sem);
 
         return rc;
 }
@@ -205,12 +205,12 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
 {
         int rc = 0;
 
-        cfs_down_write(&ioctl_list_sem);
+       down_write(&ioctl_list_sem);
         if (cfs_list_empty(&hand->item))
                 rc = -ENOENT;
         else
                 cfs_list_del_init(&hand->item);
-        cfs_up_write(&ioctl_list_sem);
+       up_write(&ioctl_list_sem);
 
         return rc;
 }
@@ -305,7 +305,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
         default: {
                 struct libcfs_ioctl_handler *hand;
                 err = -EINVAL;
-                cfs_down_read(&ioctl_list_sem);
+               down_read(&ioctl_list_sem);
                 cfs_list_for_each_entry_typed(hand, &ioctl_list,
                         struct libcfs_ioctl_handler, item) {
                         err = hand->handle_ioctl(cmd, data);
@@ -316,7 +316,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
                                 break;
                         }
                 }
-                cfs_up_read(&ioctl_list_sem);
+               up_read(&ioctl_list_sem);
                 break;
         }
         }
@@ -365,8 +365,8 @@ MODULE_DESCRIPTION("Portals v3.1");
 MODULE_LICENSE("GPL");
 
 extern cfs_psdev_t libcfs_dev;
-extern cfs_rw_semaphore_t cfs_tracefile_sem;
-extern cfs_mutex_t cfs_trace_thread_mutex;
+extern struct rw_semaphore cfs_tracefile_sem;
+extern struct mutex cfs_trace_thread_mutex;
 extern struct cfs_wi_sched *cfs_sched_rehash;
 
 extern void libcfs_init_nidstrings(void);
@@ -379,9 +379,9 @@ static int init_libcfs_module(void)
 
        libcfs_arch_init();
        libcfs_init_nidstrings();
-       cfs_init_rwsem(&cfs_tracefile_sem);
-       cfs_mutex_init(&cfs_trace_thread_mutex);
-       cfs_init_rwsem(&ioctl_list_sem);
+       init_rwsem(&cfs_tracefile_sem);
+       mutex_init(&cfs_trace_thread_mutex);
+       init_rwsem(&ioctl_list_sem);
        CFS_INIT_LIST_HEAD(&ioctl_list);
        cfs_waitq_init(&cfs_race_waitq);
 
@@ -488,8 +488,8 @@ static void exit_libcfs_module(void)
                printk(CFS_KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n",
                       rc);
 
-       cfs_fini_rwsem(&ioctl_list_sem);
-       cfs_fini_rwsem(&cfs_tracefile_sem);
+       fini_rwsem(&ioctl_list_sem);
+       fini_rwsem(&cfs_tracefile_sem);
 
        libcfs_arch_cleanup();
 }
index 4bcb858..5b2c478 100644 (file)
@@ -64,15 +64,15 @@ static char      libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE];
 static int       libcfs_nidstring_idx = 0;
 
 #ifdef __KERNEL__
-static cfs_spinlock_t libcfs_nidstring_lock;
+static spinlock_t libcfs_nidstring_lock;
 
 void libcfs_init_nidstrings (void)
 {
-        cfs_spin_lock_init(&libcfs_nidstring_lock);
+       spin_lock_init(&libcfs_nidstring_lock);
 }
 
-# define NIDSTR_LOCK(f)   cfs_spin_lock_irqsave(&libcfs_nidstring_lock, f)
-# define NIDSTR_UNLOCK(f) cfs_spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
+# define NIDSTR_LOCK(f)   spin_lock_irqsave(&libcfs_nidstring_lock, f)
+# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
 #else
 # define NIDSTR_LOCK(f)   (f=sizeof(f))  /* avoid set-but-unused warnings */
 # define NIDSTR_UNLOCK(f) (f=sizeof(f))
index 115a70a..cdc218e 100644 (file)
@@ -50,7 +50,7 @@ union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cache
 char cfs_tracefile[TRACEFILE_NAME_SIZE];
 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
 static struct tracefiled_ctl trace_tctl;
-cfs_mutex_t cfs_trace_thread_mutex;
+struct mutex cfs_trace_thread_mutex;
 static int thread_running = 0;
 
 cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
@@ -198,7 +198,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
                        pgcount + 1, tcd->tcd_cur_pages);
 
         CFS_INIT_LIST_HEAD(&pc.pc_pages);
-        cfs_spin_lock_init(&pc.pc_lock);
+       spin_lock_init(&pc.pc_lock);
 
         cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
                                            struct cfs_trace_page, linkage) {
@@ -532,10 +532,10 @@ panic_collect_pages(struct page_collection *pc)
 
 static void collect_pages_on_all_cpus(struct page_collection *pc)
 {
-        struct cfs_trace_cpu_data *tcd;
-        int i, cpu;
+       struct cfs_trace_cpu_data *tcd;
+       int i, cpu;
 
-        cfs_spin_lock(&pc->pc_lock);
+       spin_lock(&pc->pc_lock);
         cfs_for_each_possible_cpu(cpu) {
                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
                         cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
@@ -547,7 +547,7 @@ static void collect_pages_on_all_cpus(struct page_collection *pc)
                         }
                 }
         }
-        cfs_spin_unlock(&pc->pc_lock);
+       spin_unlock(&pc->pc_lock);
 }
 
 static void collect_pages(struct page_collection *pc)
@@ -568,7 +568,7 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc)
         struct cfs_trace_page *tmp;
         int i, cpu;
 
-        cfs_spin_lock(&pc->pc_lock);
+       spin_lock(&pc->pc_lock);
         cfs_for_each_possible_cpu(cpu) {
                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
                         cur_head = tcd->tcd_pages.next;
@@ -588,7 +588,7 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc)
                         }
                 }
         }
-        cfs_spin_unlock(&pc->pc_lock);
+       spin_unlock(&pc->pc_lock);
 }
 
 static void put_pages_back(struct page_collection *pc)
@@ -602,12 +602,12 @@ static void put_pages_back(struct page_collection *pc)
  * if we have been steadily writing (and otherwise discarding) pages via the
  * debug daemon. */
 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
-                                         struct cfs_trace_cpu_data *tcd)
+                                        struct cfs_trace_cpu_data *tcd)
 {
-        struct cfs_trace_page *tage;
-        struct cfs_trace_page *tmp;
+       struct cfs_trace_page *tage;
+       struct cfs_trace_page *tmp;
 
-        cfs_spin_lock(&pc->pc_lock);
+       spin_lock(&pc->pc_lock);
         cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
                                            struct cfs_trace_page, linkage) {
 
@@ -632,7 +632,7 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
                         tcd->tcd_cur_daemon_pages--;
                 }
         }
-        cfs_spin_unlock(&pc->pc_lock);
+       spin_unlock(&pc->pc_lock);
 }
 
 static void put_pages_on_daemon_list(struct page_collection *pc)
@@ -648,11 +648,11 @@ static void put_pages_on_daemon_list(struct page_collection *pc)
 
 void cfs_trace_debug_print(void)
 {
-        struct page_collection pc;
-        struct cfs_trace_page *tage;
-        struct cfs_trace_page *tmp;
+       struct page_collection pc;
+       struct cfs_trace_page *tage;
+       struct cfs_trace_page *tmp;
 
-        cfs_spin_lock_init(&pc.pc_lock);
+       spin_lock_init(&pc.pc_lock);
 
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
@@ -708,7 +708,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
                 goto out;
         }
 
-        cfs_spin_lock_init(&pc.pc_lock);
+       spin_lock_init(&pc.pc_lock);
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
         if (cfs_list_empty(&pc.pc_pages)) {
@@ -749,11 +749,11 @@ int cfs_tracefile_dump_all_pages(char *filename)
 
 void cfs_trace_flush_pages(void)
 {
-        struct page_collection pc;
-        struct cfs_trace_page *tage;
-        struct cfs_trace_page *tmp;
+       struct page_collection pc;
+       struct cfs_trace_page *tage;
+       struct cfs_trace_page *tmp;
 
-        cfs_spin_lock_init(&pc.pc_lock);
+       spin_lock_init(&pc.pc_lock);
 
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
@@ -1001,8 +1001,8 @@ static int tracefiled(void *arg)
         /* this is so broken in uml?  what on earth is going on? */
         cfs_daemonize("ktracefiled");
 
-        cfs_spin_lock_init(&pc.pc_lock);
-        cfs_complete(&tctl->tctl_start);
+       spin_lock_init(&pc.pc_lock);
+       complete(&tctl->tctl_start);
 
         while (1) {
                 cfs_waitlink_t __wait;
@@ -1095,7 +1095,7 @@ end_loop:
                                     cfs_time_seconds(1));
                 cfs_waitq_del(&tctl->tctl_waitq, &__wait);
         }
-        cfs_complete(&tctl->tctl_stop);
+       complete(&tctl->tctl_stop);
         return 0;
 }
 
@@ -1104,12 +1104,12 @@ int cfs_trace_start_thread(void)
         struct tracefiled_ctl *tctl = &trace_tctl;
         int rc = 0;
 
-        cfs_mutex_lock(&cfs_trace_thread_mutex);
+       mutex_lock(&cfs_trace_thread_mutex);
         if (thread_running)
                 goto out;
 
-        cfs_init_completion(&tctl->tctl_start);
-        cfs_init_completion(&tctl->tctl_stop);
+       init_completion(&tctl->tctl_start);
+       init_completion(&tctl->tctl_stop);
         cfs_waitq_init(&tctl->tctl_waitq);
         cfs_atomic_set(&tctl->tctl_shutdown, 0);
 
@@ -1118,10 +1118,10 @@ int cfs_trace_start_thread(void)
                 goto out;
         }
 
-        cfs_wait_for_completion(&tctl->tctl_start);
+       wait_for_completion(&tctl->tctl_start);
         thread_running = 1;
 out:
-        cfs_mutex_unlock(&cfs_trace_thread_mutex);
+       mutex_unlock(&cfs_trace_thread_mutex);
         return rc;
 }
 
@@ -1129,15 +1129,15 @@ void cfs_trace_stop_thread(void)
 {
         struct tracefiled_ctl *tctl = &trace_tctl;
 
-        cfs_mutex_lock(&cfs_trace_thread_mutex);
+       mutex_lock(&cfs_trace_thread_mutex);
         if (thread_running) {
                 printk(CFS_KERN_INFO
                        "Lustre: shutting down debug daemon thread...\n");
                 cfs_atomic_set(&tctl->tctl_shutdown, 1);
-                cfs_wait_for_completion(&tctl->tctl_stop);
+               wait_for_completion(&tctl->tctl_stop);
                 thread_running = 0;
         }
-        cfs_mutex_unlock(&cfs_trace_thread_mutex);
+       mutex_unlock(&cfs_trace_thread_mutex);
 }
 
 int cfs_tracefile_init(int max_pages)
@@ -1197,14 +1197,14 @@ static void trace_cleanup_on_all_cpus(void)
 
 static void cfs_trace_cleanup(void)
 {
-        struct page_collection pc;
+       struct page_collection pc;
 
-        CFS_INIT_LIST_HEAD(&pc.pc_pages);
-        cfs_spin_lock_init(&pc.pc_lock);
+       CFS_INIT_LIST_HEAD(&pc.pc_pages);
+       spin_lock_init(&pc.pc_lock);
 
-        trace_cleanup_on_all_cpus();
+       trace_cleanup_on_all_cpus();
 
-        cfs_tracefile_fini_arch();
+       cfs_tracefile_fini_arch();
 }
 
 void cfs_tracefile_exit(void)
index 5e7e9b1..b8124e2 100644 (file)
@@ -117,7 +117,7 @@ union cfs_trace_data_union {
                 * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
                 * tcd_for_each_type_lock
                 */
-               cfs_spinlock_t          tcd_lock;
+               spinlock_t              tcd_lock;
                unsigned long           tcd_lock_flags;
 
                /*
@@ -201,7 +201,7 @@ extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
 struct page_collection {
-       cfs_list_t              pc_pages;
+       cfs_list_t      pc_pages;
        /*
         * spin-lock protecting ->pc_pages. It is taken by smp_call_function()
         * call-back functions. XXX nikita: Which is horrible: all processors
@@ -209,23 +209,23 @@ struct page_collection {
         * lock. Probably ->pc_pages should be replaced with an array of
         * NR_CPUS elements accessed locklessly.
         */
-       cfs_spinlock_t          pc_lock;
+       spinlock_t      pc_lock;
        /*
         * if this flag is set, collect_pages() will spill both
         * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
         * only ->tcd_pages are spilled.
         */
-       int                     pc_want_daemon_pages;
+       int             pc_want_daemon_pages;
 };
 
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
 struct tracefiled_ctl {
-       cfs_completion_t       tctl_start;
-       cfs_completion_t       tctl_stop;
-       cfs_waitq_t            tctl_waitq;
-       pid_t                  tctl_pid;
-       cfs_atomic_t           tctl_shutdown;
+       struct completion       tctl_start;
+       struct completion       tctl_stop;
+       cfs_waitq_t             tctl_waitq;
+       pid_t                   tctl_pid;
+       cfs_atomic_t            tctl_shutdown;
 };
 
 /*
index 83f8341..83b0185 100644 (file)
@@ -157,7 +157,7 @@ struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 find_again:
         found = 0;
-        cfs_spin_lock(&cache->uc_lock);
+       spin_lock(&cache->uc_lock);
         cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
                 /* check invalid & expired items */
                 if (check_unlink_entry(cache, entry))
@@ -170,7 +170,7 @@ find_again:
 
         if (!found) {
                 if (!new) {
-                        cfs_spin_unlock(&cache->uc_lock);
+                       spin_unlock(&cache->uc_lock);
                         new = alloc_entry(cache, key, args);
                         if (!new) {
                                 CERROR("fail to alloc entry\n");
@@ -194,9 +194,9 @@ find_again:
         if (UC_CACHE_IS_NEW(entry)) {
                 UC_CACHE_SET_ACQUIRING(entry);
                 UC_CACHE_CLEAR_NEW(entry);
-                cfs_spin_unlock(&cache->uc_lock);
-                rc = refresh_entry(cache, entry);
-                cfs_spin_lock(&cache->uc_lock);
+               spin_unlock(&cache->uc_lock);
+               rc = refresh_entry(cache, entry);
+               spin_lock(&cache->uc_lock);
                 entry->ue_acquire_expire =
                         cfs_time_shift(cache->uc_acquire_expire);
                 if (rc < 0) {
@@ -220,12 +220,12 @@ find_again:
                 cfs_waitlink_init(&wait);
                 cfs_waitq_add(&entry->ue_waitq, &wait);
                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_spin_unlock(&cache->uc_lock);
+               spin_unlock(&cache->uc_lock);
 
-                left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
-                                           expiry);
+               left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+                                          expiry);
 
-                cfs_spin_lock(&cache->uc_lock);
+               spin_lock(&cache->uc_lock);
                 cfs_waitq_del(&entry->ue_waitq, &wait);
                 if (UC_CACHE_IS_ACQUIRING(entry)) {
                         /* we're interrupted or upcall failed in the middle */
@@ -253,36 +253,36 @@ find_again:
                  * without any error, should at least give a
                  * chance to use it once.
                  */
-                if (entry != new) {
-                        put_entry(cache, entry);
-                        cfs_spin_unlock(&cache->uc_lock);
-                        new = NULL;
-                        goto find_again;
-                }
-        }
+               if (entry != new) {
+                       put_entry(cache, entry);
+                       spin_unlock(&cache->uc_lock);
+                       new = NULL;
+                       goto find_again;
+               }
+       }
 
         /* Now we know it's good */
 out:
-        cfs_spin_unlock(&cache->uc_lock);
-        RETURN(entry);
+       spin_unlock(&cache->uc_lock);
+       RETURN(entry);
 }
 EXPORT_SYMBOL(upcall_cache_get_entry);
 
 void upcall_cache_put_entry(struct upcall_cache *cache,
                             struct upcall_cache_entry *entry)
 {
-        ENTRY;
-
-        if (!entry) {
-                EXIT;
-                return;
-        }
-
-        LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
-        cfs_spin_lock(&cache->uc_lock);
-        put_entry(cache, entry);
-        cfs_spin_unlock(&cache->uc_lock);
-        EXIT;
+       ENTRY;
+
+       if (!entry) {
+               EXIT;
+               return;
+       }
+
+       LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+       spin_lock(&cache->uc_lock);
+       put_entry(cache, entry);
+       spin_unlock(&cache->uc_lock);
+       EXIT;
 }
 EXPORT_SYMBOL(upcall_cache_put_entry);
 
@@ -298,7 +298,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
 
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 
-        cfs_spin_lock(&cache->uc_lock);
+       spin_lock(&cache->uc_lock);
         cfs_list_for_each_entry(entry, head, ue_hash) {
                 if (downcall_compare(cache, entry, key, args) == 0) {
                         found = 1;
@@ -311,7 +311,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
                        cache->uc_name, key);
                 /* haven't found, it's possible */
-                cfs_spin_unlock(&cache->uc_lock);
+               spin_unlock(&cache->uc_lock);
                 RETURN(-EINVAL);
         }
 
@@ -333,10 +333,10 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
                 GOTO(out, rc = -EINVAL);
         }
 
-        cfs_spin_unlock(&cache->uc_lock);
-        if (cache->uc_ops->parse_downcall)
-                rc = cache->uc_ops->parse_downcall(cache, entry, args);
-        cfs_spin_lock(&cache->uc_lock);
+       spin_unlock(&cache->uc_lock);
+       if (cache->uc_ops->parse_downcall)
+               rc = cache->uc_ops->parse_downcall(cache, entry, args);
+       spin_lock(&cache->uc_lock);
         if (rc)
                 GOTO(out, rc);
 
@@ -350,21 +350,21 @@ out:
                 cfs_list_del_init(&entry->ue_hash);
         }
         UC_CACHE_CLEAR_ACQUIRING(entry);
-        cfs_spin_unlock(&cache->uc_lock);
-        cfs_waitq_broadcast(&entry->ue_waitq);
-        put_entry(cache, entry);
+       spin_unlock(&cache->uc_lock);
+       cfs_waitq_broadcast(&entry->ue_waitq);
+       put_entry(cache, entry);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 EXPORT_SYMBOL(upcall_cache_downcall);
 
 static void cache_flush(struct upcall_cache *cache, int force)
 {
-        struct upcall_cache_entry *entry, *next;
-        int i;
-        ENTRY;
+       struct upcall_cache_entry *entry, *next;
+       int i;
+       ENTRY;
 
-        cfs_spin_lock(&cache->uc_lock);
+       spin_lock(&cache->uc_lock);
         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
                 cfs_list_for_each_entry_safe(entry, next,
                                          &cache->uc_hashtable[i], ue_hash) {
@@ -376,8 +376,8 @@ static void cache_flush(struct upcall_cache *cache, int force)
                         free_entry(cache, entry);
                 }
         }
-        cfs_spin_unlock(&cache->uc_lock);
-        EXIT;
+       spin_unlock(&cache->uc_lock);
+       EXIT;
 }
 
 void upcall_cache_flush_idle(struct upcall_cache *cache)
@@ -401,7 +401,7 @@ void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
 
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 
-        cfs_spin_lock(&cache->uc_lock);
+       spin_lock(&cache->uc_lock);
         cfs_list_for_each_entry(entry, head, ue_hash) {
                 if (upcall_compare(cache, entry, key, args) == 0) {
                         found = 1;
@@ -420,7 +420,7 @@ void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
                 if (!cfs_atomic_read(&entry->ue_refcount))
                         free_entry(cache, entry);
         }
-        cfs_spin_unlock(&cache->uc_lock);
+       spin_unlock(&cache->uc_lock);
 }
 EXPORT_SYMBOL(upcall_cache_flush_one);
 
@@ -435,8 +435,8 @@ struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
         if (!cache)
                 RETURN(ERR_PTR(-ENOMEM));
 
-        cfs_spin_lock_init(&cache->uc_lock);
-        cfs_rwlock_init(&cache->uc_upcall_rwlock);
+       spin_lock_init(&cache->uc_lock);
+       rwlock_init(&cache->uc_upcall_rwlock);
         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
                 CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
         strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
index 3ac285c..e5031f6 100644 (file)
@@ -38,7 +38,7 @@
 
 #define OFF_BY_START(start)     ((start)/BITS_PER_LONG)
 
-unsigned long cfs_find_next_bit(unsigned long *addr,
+unsigned long find_next_bit(unsigned long *addr,
                                 unsigned long size, unsigned long offset)
 {
         unsigned long *word, *last;
@@ -72,7 +72,7 @@ found:
         return base + bit;
 }
 
-unsigned long cfs_find_next_zero_bit(unsigned long *addr,
+unsigned long find_next_zero_bit(unsigned long *addr,
                                      unsigned long size, unsigned long offset)
 {
         unsigned long *word, *last;
index df9b2fc..39ac628 100644 (file)
  * No-op implementation.
  */
 
-void cfs_spin_lock_init(cfs_spinlock_t *lock)
+void spin_lock_init(spinlock_t *lock)
 {
-        LASSERT(lock != NULL);
-        (void)lock;
+       LASSERT(lock != NULL);
+       (void)lock;
 }
 
-void cfs_spin_lock(cfs_spinlock_t *lock)
+void spin_lock(spinlock_t *lock)
 {
-        (void)lock;
+       (void)lock;
 }
 
-void cfs_spin_unlock(cfs_spinlock_t *lock)
+void spin_unlock(spinlock_t *lock)
 {
-        (void)lock;
+       (void)lock;
 }
 
-int cfs_spin_trylock(cfs_spinlock_t *lock)
+int spin_trylock(spinlock_t *lock)
 {
-        (void)lock;
+       (void)lock;
        return 1;
 }
 
-void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
+void spin_lock_bh_init(spinlock_t *lock)
 {
-        LASSERT(lock != NULL);
-        (void)lock;
+       LASSERT(lock != NULL);
+       (void)lock;
 }
 
-void cfs_spin_lock_bh(cfs_spinlock_t *lock)
+void spin_lock_bh(spinlock_t *lock)
 {
-        LASSERT(lock != NULL);
-        (void)lock;
+       LASSERT(lock != NULL);
+       (void)lock;
 }
 
-void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
+void spin_unlock_bh(spinlock_t *lock)
 {
-        LASSERT(lock != NULL);
-        (void)lock;
+       LASSERT(lock != NULL);
+       (void)lock;
 }
 
 /*
@@ -117,30 +117,30 @@ void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
  * - __up(x)
  */
 
-void cfs_sema_init(cfs_semaphore_t *s, int val)
+void sema_init(struct semaphore *s, int val)
 {
-        LASSERT(s != NULL);
-        (void)s;
-        (void)val;
+       LASSERT(s != NULL);
+       (void)s;
+       (void)val;
 }
 
-void __down(cfs_semaphore_t *s)
+void __down(struct semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-int __down_interruptible(cfs_semaphore_t *s)
+int __down_interruptible(struct semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
-        return 0;
+       LASSERT(s != NULL);
+       (void)s;
+       return 0;
 }
 
-void __up(cfs_semaphore_t *s)
+void __up(struct semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
 
@@ -152,51 +152,51 @@ void __up(cfs_semaphore_t *s)
  * - wait_for_completion(c)
  */
 
-static cfs_wait_handler_t wait_handler;
+static wait_handler_t wait_handler;
 
-void cfs_init_completion_module(cfs_wait_handler_t handler)
+void init_completion_module(wait_handler_t handler)
 {
-        wait_handler = handler;
+       wait_handler = handler;
 }
 
-int cfs_call_wait_handler(int timeout)
+int call_wait_handler(int timeout)
 {
-        if (!wait_handler)
-                return -ENOSYS;
-        return wait_handler(timeout);
+       if (!wait_handler)
+               return -ENOSYS;
+       return wait_handler(timeout);
 }
 
-void cfs_init_completion(cfs_completion_t *c)
+void init_completion(struct completion *c)
 {
-        LASSERT(c != NULL);
-        c->done = 0;
-        cfs_waitq_init(&c->wait);
+       LASSERT(c != NULL);
+       c->done = 0;
+       cfs_waitq_init(&c->wait);
 }
 
-void cfs_complete(cfs_completion_t *c)
+void complete(struct completion *c)
 {
-        LASSERT(c != NULL);
-        c->done  = 1;
-        cfs_waitq_signal(&c->wait);
+       LASSERT(c != NULL);
+       c->done  = 1;
+       cfs_waitq_signal(&c->wait);
 }
 
-void cfs_wait_for_completion(cfs_completion_t *c)
+void wait_for_completion(struct completion *c)
 {
-        LASSERT(c != NULL);
-        do {
-                if (cfs_call_wait_handler(1000) < 0)
-                        break;
-        } while (c->done == 0);
+       LASSERT(c != NULL);
+       do {
+               if (call_wait_handler(1000) < 0)
+                       break;
+       } while (c->done == 0);
 }
 
-int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
+int wait_for_completion_interruptible(struct completion *c)
 {
-        LASSERT(c != NULL);
-        do {
-                if (cfs_call_wait_handler(1000) < 0)
-                        break;
-        } while (c->done == 0);
-        return 0;
+       LASSERT(c != NULL);
+       do {
+               if (call_wait_handler(1000) < 0)
+                       break;
+       } while (c->done == 0);
+       return 0;
 }
 
 /*
@@ -210,54 +210,54 @@ int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
  * - up_write(x)
  */
 
-void cfs_init_rwsem(cfs_rw_semaphore_t *s)
+void init_rwsem(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-void cfs_down_read(cfs_rw_semaphore_t *s)
+void down_read(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
+int down_read_trylock(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
        return 1;
 }
 
-void cfs_down_write(cfs_rw_semaphore_t *s)
+void down_write(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
+int down_write_trylock(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
        return 1;
 }
 
-void cfs_up_read(cfs_rw_semaphore_t *s)
+void up_read(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-void cfs_up_write(cfs_rw_semaphore_t *s)
+void up_write(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
+void fini_rwsem(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
 #ifdef HAVE_LIBPTHREAD
@@ -266,7 +266,7 @@ void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
  * Multi-threaded user space completion
  */
 
-void cfs_mt_init_completion(cfs_mt_completion_t *c)
+void mt_init_completion(mt_completion_t *c)
 {
         LASSERT(c != NULL);
         c->c_done = 0;
@@ -274,14 +274,14 @@ void cfs_mt_init_completion(cfs_mt_completion_t *c)
         pthread_cond_init(&c->c_cond, NULL);
 }
 
-void cfs_mt_fini_completion(cfs_mt_completion_t *c)
+void mt_fini_completion(mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_destroy(&c->c_mut);
         pthread_cond_destroy(&c->c_cond);
 }
 
-void cfs_mt_complete(cfs_mt_completion_t *c)
+void mt_complete(mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_lock(&c->c_mut);
@@ -290,7 +290,7 @@ void cfs_mt_complete(cfs_mt_completion_t *c)
         pthread_mutex_unlock(&c->c_mut);
 }
 
-void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
+void mt_wait_for_completion(mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_lock(&c->c_mut);
@@ -306,7 +306,7 @@ void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
 
 static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
 
-int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
+int mt_atomic_read(mt_atomic_t *a)
 {
         int r;
 
@@ -316,14 +316,14 @@ int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
         return r;
 }
 
-void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
+void mt_atomic_set(mt_atomic_t *a, int b)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         a->counter = b;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
+int mt_atomic_dec_and_test(mt_atomic_t *a)
 {
         int r;
 
@@ -333,20 +333,20 @@ int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
         return (r == 0);
 }
 
-void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
+void mt_atomic_inc(mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         ++a->counter;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
+void mt_atomic_dec(mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         --a->counter;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
-void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
+void mt_atomic_add(int b, mt_atomic_t *a)
 
 {
         pthread_mutex_lock(&atomic_guard_lock);
@@ -354,7 +354,7 @@ void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
+void mt_atomic_sub(int b, mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         a->counter -= b;
index 0cc1e3d..2c03cb7 100644 (file)
@@ -125,7 +125,7 @@ void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
         (void)link;
 
         /* well, wait for something to happen */
-        cfs_call_wait_handler(0);
+       call_wait_handler(0);
 }
 
 int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
@@ -133,7 +133,7 @@ int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
 {
         LASSERT(link != NULL);
         (void)link;
-        cfs_call_wait_handler(timeout);
+       call_wait_handler(timeout);
         return 0;
 }
 
index a394794..eb1125d 100644 (file)
@@ -42,7 +42,7 @@
 #include "tracefile.h"
 
 struct lc_watchdog {
-        cfs_spinlock_t  lcw_lock;     /* check or change lcw_list */
+       spinlock_t  lcw_lock;     /* check or change lcw_list */
         int             lcw_refcount; /* must hold lcw_pending_timers_lock */
         cfs_timer_t     lcw_timer;    /* kernel timer */
         cfs_list_t      lcw_list;     /* chain on pending list */
@@ -66,8 +66,8 @@ struct lc_watchdog {
  * and lcw_stop_completion when it exits.
  * Wake lcw_event_waitq to signal timer callback dispatches.
  */
-static cfs_completion_t lcw_start_completion;
-static cfs_completion_t  lcw_stop_completion;
+static struct completion lcw_start_completion;
+static struct completion  lcw_stop_completion;
 static cfs_waitq_t lcw_event_waitq;
 
 /*
@@ -84,7 +84,7 @@ static unsigned long lcw_flags = 0;
  * When it hits 0, we stop the dispatcher.
  */
 static __u32         lcw_refcount = 0;
-static CFS_DEFINE_MUTEX(lcw_refcount_mutex);
+static DEFINE_MUTEX(lcw_refcount_mutex);
 
 /*
  * List of timers that have fired that need their callbacks run by the
@@ -103,7 +103,7 @@ lcw_dump(struct lc_watchdog *lcw)
 {
         ENTRY;
 #if defined(HAVE_TASKLIST_LOCK)
-        cfs_read_lock(&tasklist_lock);
+       read_lock(&tasklist_lock);
 #else
         rcu_read_lock();
 #endif
@@ -116,7 +116,7 @@ lcw_dump(struct lc_watchdog *lcw)
         }
 
 #if defined(HAVE_TASKLIST_LOCK)
-        cfs_read_unlock(&tasklist_lock);
+       read_unlock(&tasklist_lock);
 #else
         rcu_read_unlock();
 #endif
@@ -135,30 +135,30 @@ static void lcw_cb(ulong_ptr_t data)
 
         lcw->lcw_state = LC_WATCHDOG_EXPIRED;
 
-        cfs_spin_lock_bh(&lcw->lcw_lock);
-        LASSERT(cfs_list_empty(&lcw->lcw_list));
+       spin_lock_bh(&lcw->lcw_lock);
+       LASSERT(cfs_list_empty(&lcw->lcw_list));
 
-        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-        lcw->lcw_refcount++; /* +1 for pending list */
-        cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
-        cfs_waitq_signal(&lcw_event_waitq);
+       spin_lock_bh(&lcw_pending_timers_lock);
+       lcw->lcw_refcount++; /* +1 for pending list */
+       cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
+       cfs_waitq_signal(&lcw_event_waitq);
 
-        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-        cfs_spin_unlock_bh(&lcw->lcw_lock);
-        EXIT;
+       spin_unlock_bh(&lcw_pending_timers_lock);
+       spin_unlock_bh(&lcw->lcw_lock);
+       EXIT;
 }
 
 static int is_watchdog_fired(void)
 {
-        int rc;
+       int rc;
 
-        if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags))
-                return 1;
+       if (test_bit(LCW_FLAG_STOP, &lcw_flags))
+               return 1;
 
-        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-        rc = !cfs_list_empty(&lcw_pending_timers);
-        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-        return rc;
+       spin_lock_bh(&lcw_pending_timers_lock);
+       rc = !cfs_list_empty(&lcw_pending_timers);
+       spin_unlock_bh(&lcw_pending_timers_lock);
+       return rc;
 }
 
 static void lcw_dump_stack(struct lc_watchdog *lcw)
@@ -224,7 +224,7 @@ static int lcw_dispatch_main(void *data)
         RECALC_SIGPENDING;
         SIGNAL_MASK_UNLOCK(current, flags);
 
-        cfs_complete(&lcw_start_completion);
+       complete(&lcw_start_completion);
 
         while (1) {
                 int dumplog = 1;
@@ -232,20 +232,20 @@ static int lcw_dispatch_main(void *data)
                 cfs_wait_event_interruptible(lcw_event_waitq,
                                              is_watchdog_fired(), rc);
                 CDEBUG(D_INFO, "Watchdog got woken up...\n");
-                if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) {
-                        CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n");
-
-                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-                        rc = !cfs_list_empty(&lcw_pending_timers);
-                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-                        if (rc) {
-                                CERROR("pending timers list was not empty at "
-                                       "time of watchdog dispatch shutdown\n");
-                        }
-                        break;
-                }
-
-                cfs_spin_lock_bh(&lcw_pending_timers_lock);
+               if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
+                       CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
+
+                       spin_lock_bh(&lcw_pending_timers_lock);
+                       rc = !cfs_list_empty(&lcw_pending_timers);
+                       spin_unlock_bh(&lcw_pending_timers_lock);
+                       if (rc) {
+                               CERROR("pending timers list was not empty at "
+                                      "time of watchdog dispatch shutdown\n");
+                       }
+                       break;
+               }
+
+               spin_lock_bh(&lcw_pending_timers_lock);
                 while (!cfs_list_empty(&lcw_pending_timers)) {
                         int is_dumplog;
 
@@ -254,18 +254,18 @@ static int lcw_dispatch_main(void *data)
                         /* +1 ref for callback to make sure lwc wouldn't be
                          * deleted after releasing lcw_pending_timers_lock */
                         lcw->lcw_refcount++;
-                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-
-                        /* lock ordering */
-                        cfs_spin_lock_bh(&lcw->lcw_lock);
-                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-
-                        if (cfs_list_empty(&lcw->lcw_list)) {
-                                /* already removed from pending list */
-                                lcw->lcw_refcount--; /* -1 ref for callback */
-                                if (lcw->lcw_refcount == 0)
-                                        cfs_list_add(&lcw->lcw_list, &zombies);
-                                cfs_spin_unlock_bh(&lcw->lcw_lock);
+                       spin_unlock_bh(&lcw_pending_timers_lock);
+
+                       /* lock ordering */
+                       spin_lock_bh(&lcw->lcw_lock);
+                       spin_lock_bh(&lcw_pending_timers_lock);
+
+                       if (cfs_list_empty(&lcw->lcw_list)) {
+                               /* already removed from pending list */
+                               lcw->lcw_refcount--; /* -1 ref for callback */
+                               if (lcw->lcw_refcount == 0)
+                                       cfs_list_add(&lcw->lcw_list, &zombies);
+                               spin_unlock_bh(&lcw->lcw_lock);
                                 /* still hold lcw_pending_timers_lock */
                                 continue;
                         }
@@ -273,8 +273,8 @@ static int lcw_dispatch_main(void *data)
                         cfs_list_del_init(&lcw->lcw_list);
                         lcw->lcw_refcount--; /* -1 ref for pending list */
 
-                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-                        cfs_spin_unlock_bh(&lcw->lcw_lock);
+                       spin_unlock_bh(&lcw_pending_timers_lock);
+                       spin_unlock_bh(&lcw->lcw_lock);
 
                         CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
                                lcw->lcw_pid);
@@ -288,12 +288,12 @@ static int lcw_dispatch_main(void *data)
                                         dumplog = 0;
                         }
 
-                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-                        lcw->lcw_refcount--; /* -1 ref for callback */
-                        if (lcw->lcw_refcount == 0)
-                                cfs_list_add(&lcw->lcw_list, &zombies);
-                }
-                cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+                       spin_lock_bh(&lcw_pending_timers_lock);
+                       lcw->lcw_refcount--; /* -1 ref for callback */
+                       if (lcw->lcw_refcount == 0)
+                               cfs_list_add(&lcw->lcw_list, &zombies);
+               }
+               spin_unlock_bh(&lcw_pending_timers_lock);
 
                 while (!cfs_list_empty(&zombies)) {
                         lcw = cfs_list_entry(lcw_pending_timers.next,
@@ -303,20 +303,20 @@ static int lcw_dispatch_main(void *data)
                 }
         }
 
-        cfs_complete(&lcw_stop_completion);
+       complete(&lcw_stop_completion);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 static void lcw_dispatch_start(void)
 {
-        int rc;
+       int rc;
 
-        ENTRY;
-        LASSERT(lcw_refcount == 1);
+       ENTRY;
+       LASSERT(lcw_refcount == 1);
 
-        cfs_init_completion(&lcw_stop_completion);
-        cfs_init_completion(&lcw_start_completion);
+       init_completion(&lcw_stop_completion);
+       init_completion(&lcw_start_completion);
         cfs_waitq_init(&lcw_event_waitq);
 
         CDEBUG(D_INFO, "starting dispatch thread\n");
@@ -326,27 +326,27 @@ static void lcw_dispatch_start(void)
                 EXIT;
                 return;
         }
-        cfs_wait_for_completion(&lcw_start_completion);
-        CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
+       wait_for_completion(&lcw_start_completion);
+       CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
 
-        EXIT;
+       EXIT;
 }
 
 static void lcw_dispatch_stop(void)
 {
-        ENTRY;
-        LASSERT(lcw_refcount == 0);
+       ENTRY;
+       LASSERT(lcw_refcount == 0);
 
-        CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
+       CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
 
-        cfs_set_bit(LCW_FLAG_STOP, &lcw_flags);
-        cfs_waitq_signal(&lcw_event_waitq);
+       set_bit(LCW_FLAG_STOP, &lcw_flags);
+       cfs_waitq_signal(&lcw_event_waitq);
 
-        cfs_wait_for_completion(&lcw_stop_completion);
+       wait_for_completion(&lcw_stop_completion);
 
-        CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
+       CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
 
-        EXIT;
+       EXIT;
 }
 
 struct lc_watchdog *lc_watchdog_add(int timeout,
@@ -362,7 +362,7 @@ struct lc_watchdog *lc_watchdog_add(int timeout,
                 RETURN(ERR_PTR(-ENOMEM));
         }
 
-        cfs_spin_lock_init(&lcw->lcw_lock);
+       spin_lock_init(&lcw->lcw_lock);
         lcw->lcw_refcount = 1; /* refcount for owner */
         lcw->lcw_task     = cfs_current();
         lcw->lcw_pid      = cfs_curproc_pid();
@@ -373,10 +373,10 @@ struct lc_watchdog *lc_watchdog_add(int timeout,
         CFS_INIT_LIST_HEAD(&lcw->lcw_list);
         cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
 
-        cfs_mutex_lock(&lcw_refcount_mutex);
-        if (++lcw_refcount == 1)
-                lcw_dispatch_start();
-        cfs_mutex_unlock(&lcw_refcount_mutex);
+       mutex_lock(&lcw_refcount_mutex);
+       if (++lcw_refcount == 1)
+               lcw_dispatch_start();
+       mutex_unlock(&lcw_refcount_mutex);
 
         /* Keep this working in case we enable them by default */
         if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
@@ -413,15 +413,15 @@ static void lcw_update_time(struct lc_watchdog *lcw, const char *message)
 
 static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
 {
-        cfs_spin_lock_bh(&lcw->lcw_lock);
-        if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
-                cfs_spin_lock_bh(&lcw_pending_timers_lock);
-                cfs_list_del_init(&lcw->lcw_list);
-                lcw->lcw_refcount--; /* -1 ref for pending list */
-                cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-        }
-
-        cfs_spin_unlock_bh(&lcw->lcw_lock);
+       spin_lock_bh(&lcw->lcw_lock);
+       if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+               spin_lock_bh(&lcw_pending_timers_lock);
+               cfs_list_del_init(&lcw->lcw_list);
+               lcw->lcw_refcount--; /* -1 ref for pending list */
+               spin_unlock_bh(&lcw_pending_timers_lock);
+       }
+
+       spin_unlock_bh(&lcw->lcw_lock);
 }
 
 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
@@ -466,27 +466,27 @@ void lc_watchdog_delete(struct lc_watchdog *lcw)
 
         lcw_update_time(lcw, "stopped");
 
-        cfs_spin_lock_bh(&lcw->lcw_lock);
-        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-        if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
-                cfs_list_del_init(&lcw->lcw_list);
-                lcw->lcw_refcount--; /* -1 ref for pending list */
-        }
+       spin_lock_bh(&lcw->lcw_lock);
+       spin_lock_bh(&lcw_pending_timers_lock);
+       if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+               cfs_list_del_init(&lcw->lcw_list);
+               lcw->lcw_refcount--; /* -1 ref for pending list */
+       }
 
-        lcw->lcw_refcount--; /* -1 ref for owner */
-        dead = lcw->lcw_refcount == 0;
-        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-        cfs_spin_unlock_bh(&lcw->lcw_lock);
+       lcw->lcw_refcount--; /* -1 ref for owner */
+       dead = lcw->lcw_refcount == 0;
+       spin_unlock_bh(&lcw_pending_timers_lock);
+       spin_unlock_bh(&lcw->lcw_lock);
 
-        if (dead)
-                LIBCFS_FREE(lcw, sizeof(*lcw));
+       if (dead)
+               LIBCFS_FREE(lcw, sizeof(*lcw));
 
-        cfs_mutex_lock(&lcw_refcount_mutex);
-        if (--lcw_refcount == 0)
-                lcw_dispatch_stop();
-        cfs_mutex_unlock(&lcw_refcount_mutex);
+       mutex_lock(&lcw_refcount_mutex);
+       if (--lcw_refcount == 0)
+               lcw_dispatch_stop();
+       mutex_unlock(&lcw_refcount_mutex);
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(lc_watchdog_delete);
 
index 16b47b8..68d1740 100644 (file)
@@ -204,7 +204,7 @@ task_manager_notify(
     PLIST_ENTRY ListEntry = NULL; 
     PTASK_SLOT  TaskSlot  = NULL;
 
-    cfs_spin_lock(&(cfs_win_task_manger.Lock));
+       spin_lock(&(cfs_win_task_manger.Lock));
 
     ListEntry = cfs_win_task_manger.TaskList.Flink;
     while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
@@ -226,7 +226,7 @@ task_manager_notify(
         ListEntry = ListEntry->Flink;
     }
 
-    cfs_spin_unlock(&(cfs_win_task_manger.Lock));
+       spin_unlock(&(cfs_win_task_manger.Lock));
 }
 
 int
@@ -239,7 +239,7 @@ init_task_manager()
     cfs_win_task_manger.Magic = TASKMAN_MAGIC;
 
     /* initialize the spinlock protection */
-    cfs_spin_lock_init(&cfs_win_task_manger.Lock);
+       spin_lock_init(&cfs_win_task_manger.Lock);
 
     /* create slab memory cache */
     cfs_win_task_manger.slab = cfs_mem_cache_create(
@@ -285,7 +285,7 @@ cleanup_task_manager()
     }
 
     /* cleanup all the taskslots attached to the list */
-    cfs_spin_lock(&(cfs_win_task_manger.Lock));
+       spin_lock(&(cfs_win_task_manger.Lock));
 
     while (!IsListEmpty(&(cfs_win_task_manger.TaskList))) {
 
@@ -296,7 +296,7 @@ cleanup_task_manager()
         cleanup_task_slot(TaskSlot);
     }
 
-    cfs_spin_unlock(&cfs_win_task_manger.Lock);
+       spin_unlock(&cfs_win_task_manger.Lock);
 
     /* destroy the taskslot cache slab */
     cfs_mem_cache_destroy(cfs_win_task_manger.slab);
@@ -319,7 +319,7 @@ cfs_current()
     PLIST_ENTRY ListEntry = NULL; 
     PTASK_SLOT  TaskSlot  = NULL;
 
-    cfs_spin_lock(&(cfs_win_task_manger.Lock));
+       spin_lock(&(cfs_win_task_manger.Lock));
 
     ListEntry = cfs_win_task_manger.TaskList.Flink;
     while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
@@ -415,7 +415,7 @@ cfs_current()
 
 errorout:
 
-    cfs_spin_unlock(&(cfs_win_task_manger.Lock));
+       spin_unlock(&(cfs_win_task_manger.Lock));
 
     if (!TaskSlot) {
         cfs_enter_debugger();
index 0d79b53..6d25d1b 100644 (file)
@@ -286,16 +286,15 @@ int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v)
        return cfs_atomic_add_return(-i, v);
 }
 
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock)
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock)
 {
-    if (cfs_atomic_read(v) != 1) {
-        return 0;
-    }
+       if (cfs_atomic_read(v) != 1)
+               return 0;
 
-       cfs_spin_lock(lock);
+       spin_lock(lock);
        if (cfs_atomic_dec_and_test(v))
                return 1;
-       cfs_spin_unlock(lock);
+       spin_unlock(lock);
        return 0;
 }
 
@@ -306,19 +305,19 @@ int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock)
 
 
 void
-cfs_rwlock_init(cfs_rwlock_t * rwlock)
+rwlock_init(rwlock_t *rwlock)
 {
-    cfs_spin_lock_init(&rwlock->guard);
-    rwlock->count = 0;
+       spin_lock_init(&rwlock->guard);
+       rwlock->count = 0;
 }
 
 void
-cfs_rwlock_fini(cfs_rwlock_t * rwlock)
+cfs_rwlock_fini(rwlock_t *rwlock)
 {
 }
 
 void
-cfs_read_lock(cfs_rwlock_t * rwlock)
+read_lock(rwlock_t *rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -334,19 +333,19 @@ cfs_read_lock(cfs_rwlock_t * rwlock)
    
     slot->irql = KeRaiseIrqlToDpcLevel();
 
-    while (TRUE) {
-           cfs_spin_lock(&rwlock->guard);
-        if (rwlock->count >= 0)
-            break;
-        cfs_spin_unlock(&rwlock->guard);
-    }
+       while (TRUE) {
+               spin_lock(&rwlock->guard);
+                       if (rwlock->count >= 0)
+                               break;
+               spin_unlock(&rwlock->guard);
+       }
 
        rwlock->count++;
-       cfs_spin_unlock(&rwlock->guard);
+       spin_unlock(&rwlock->guard);
 }
 
 void
-cfs_read_unlock(cfs_rwlock_t * rwlock)
+read_unlock(rwlock_t *rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -359,20 +358,19 @@ cfs_read_unlock(cfs_rwlock_t * rwlock)
 
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     ASSERT(slot->Magic == TASKSLT_MAGIC);
-   
-    cfs_spin_lock(&rwlock->guard);
+
+       spin_lock(&rwlock->guard);
        ASSERT(rwlock->count > 0);
-    rwlock->count--;
-    if (rwlock < 0) {
-        cfs_enter_debugger();
-    }
-       cfs_spin_unlock(&rwlock->guard);
+       rwlock->count--;
+       if (rwlock < 0)
+               cfs_enter_debugger();
+       spin_unlock(&rwlock->guard);
 
-    KeLowerIrql(slot->irql);
+       KeLowerIrql(slot->irql);
 }
 
 void
-cfs_write_lock(cfs_rwlock_t * rwlock)
+write_lock(rwlock_t *rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -388,19 +386,19 @@ cfs_write_lock(cfs_rwlock_t * rwlock)
    
     slot->irql = KeRaiseIrqlToDpcLevel();
 
-    while (TRUE) {
-           cfs_spin_lock(&rwlock->guard);
-        if (rwlock->count == 0)
-            break;
-        cfs_spin_unlock(&rwlock->guard);
-    }
+       while (TRUE) {
+               spin_lock(&rwlock->guard);
+               if (rwlock->count == 0)
+                       break;
+               spin_unlock(&rwlock->guard);
+       }
 
        rwlock->count = -1;
-       cfs_spin_unlock(&rwlock->guard);
+       spin_unlock(&rwlock->guard);
 }
 
 void
-cfs_write_unlock(cfs_rwlock_t * rwlock)
+write_unlock(rwlock_t *rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -413,11 +411,11 @@ cfs_write_unlock(cfs_rwlock_t * rwlock)
 
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     ASSERT(slot->Magic == TASKSLT_MAGIC);
-   
-    cfs_spin_lock(&rwlock->guard);
+
+       spin_lock(&rwlock->guard);
        ASSERT(rwlock->count == -1);
-    rwlock->count = 0;
-       cfs_spin_unlock(&rwlock->guard);
+       rwlock->count = 0;
+       spin_unlock(&rwlock->guard);
 
-    KeLowerIrql(slot->irql);
+       KeLowerIrql(slot->irql);
 }
index 0792233..b64458f 100644 (file)
@@ -54,7 +54,7 @@ cfs_page_t * virt_to_page(void * addr)
     pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
     pg->mapping = addr;
     cfs_atomic_set(&pg->count, 1);
-    cfs_set_bit(PG_virt, &(pg->flags));
+       set_bit(PG_virt, &(pg->flags));
     cfs_enter_debugger();
     return pg;
 }
@@ -123,7 +123,7 @@ void cfs_free_page(cfs_page_t *pg)
     ASSERT(pg->addr  != NULL);
     ASSERT(cfs_atomic_read(&pg->count) <= 1);
 
-    if (!cfs_test_bit(PG_virt, &pg->flags)) {
+       if (!test_bit(PG_virt, &pg->flags)) {
         cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
         cfs_atomic_dec(&libcfs_total_pages);
     } else {
@@ -374,7 +374,7 @@ void cfs_mem_cache_free(cfs_mem_cache_t * kmc, void * buf)
     ExFreeToNPagedLookasideList(&(kmc->npll), buf);
 }
 
-cfs_spinlock_t  shrinker_guard = {0};
+spinlock_t  shrinker_guard = {0};
 CFS_LIST_HEAD(shrinker_hdr);
 cfs_timer_t shrinker_timer = {0};
 
@@ -382,22 +382,22 @@ struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
 {
     struct cfs_shrinker * s = (struct cfs_shrinker *)
         cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
-    if (s) {
-        s->cb = cb;
-        s->seeks = seeks;
-        s->nr = 2;
-        cfs_spin_lock(&shrinker_guard);
-        cfs_list_add(&s->list, &shrinker_hdr); 
-        cfs_spin_unlock(&shrinker_guard);
-    }
-
-    return s;
+       if (s) {
+               s->cb = cb;
+               s->seeks = seeks;
+               s->nr = 2;
+               spin_lock(&shrinker_guard);
+               cfs_list_add(&s->list, &shrinker_hdr);
+               spin_unlock(&shrinker_guard);
+       }
+
+       return s;
 }
 
 void cfs_remove_shrinker(struct cfs_shrinker *s)
 {
-    struct cfs_shrinker *tmp;
-    cfs_spin_lock(&shrinker_guard);
+       struct cfs_shrinker *tmp;
+       spin_lock(&shrinker_guard);
 #if TRUE
     cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
                                   struct cfs_shrinker, list) {
@@ -409,22 +409,22 @@ void cfs_remove_shrinker(struct cfs_shrinker *s)
 #else
     cfs_list_del(&s->list);
 #endif
-    cfs_spin_unlock(&shrinker_guard);
-    cfs_free(s);
+       spin_unlock(&shrinker_guard);
+       cfs_free(s);
 }
 
 /* time ut test proc */
 void shrinker_timer_proc(ulong_ptr_t arg)
 {
-    struct cfs_shrinker *s;
-    cfs_spin_lock(&shrinker_guard);
-
-    cfs_list_for_each_entry_typed(s, &shrinker_hdr,
-                                  struct cfs_shrinker, list) {
-            s->cb(s->nr, __GFP_FS);
-    }
-    cfs_spin_unlock(&shrinker_guard);
-    cfs_timer_arm(&shrinker_timer, 300);
+       struct cfs_shrinker *s;
+       spin_lock(&shrinker_guard);
+
+       cfs_list_for_each_entry_typed(s, &shrinker_hdr,
+                                     struct cfs_shrinker, list) {
+               s->cb(s->nr, __GFP_FS);
+       }
+       spin_unlock(&shrinker_guard);
+       cfs_timer_arm(&shrinker_timer, 300);
 }
 
 int start_shrinker_timer()
index 330b3b6..faf5f52 100644 (file)
@@ -148,7 +148,7 @@ int cfs_create_thread(int (*func)(void *), void *arg, unsigned long flag)
  */
 
 
-static CFS_DECLARE_RWSEM(cfs_symbol_lock);
+static DECLARE_RWSEM(cfs_symbol_lock);
 CFS_LIST_HEAD(cfs_symbol_list);
 
 int libcfs_is_mp_system = FALSE;
@@ -174,7 +174,7 @@ cfs_symbol_get(const char *name)
     cfs_list_t              *walker;
     struct cfs_symbol       *sym = NULL;
 
-    cfs_down_read(&cfs_symbol_lock);
+       down_read(&cfs_symbol_lock);
     cfs_list_for_each(walker, &cfs_symbol_list) {
         sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
@@ -182,7 +182,7 @@ cfs_symbol_get(const char *name)
             break;
         }
     }
-    cfs_up_read(&cfs_symbol_lock);
+       up_read(&cfs_symbol_lock);
 
     if (sym != NULL)
         return sym->value;
@@ -210,7 +210,7 @@ cfs_symbol_put(const char *name)
     cfs_list_t              *walker;
     struct cfs_symbol       *sym = NULL;
 
-    cfs_down_read(&cfs_symbol_lock);
+       down_read(&cfs_symbol_lock);
     cfs_list_for_each(walker, &cfs_symbol_list) {
         sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
@@ -219,7 +219,7 @@ cfs_symbol_put(const char *name)
             break;
         }
     }
-    cfs_up_read(&cfs_symbol_lock);
+       up_read(&cfs_symbol_lock);
 
     LASSERT(sym != NULL);
 }
@@ -257,17 +257,17 @@ cfs_symbol_register(const char *name, const void *value)
     new->ref = 0;
     CFS_INIT_LIST_HEAD(&new->sym_list);
 
-    cfs_down_write(&cfs_symbol_lock);
-    cfs_list_for_each(walker, &cfs_symbol_list) {
-        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
-        if (!strcmp(sym->name, name)) {
-            cfs_up_write(&cfs_symbol_lock);
-            cfs_free(new);
-            return 0; // alreay registerred
-        }
-    }
-    cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
-    cfs_up_write(&cfs_symbol_lock);
+       down_write(&cfs_symbol_lock);
+       cfs_list_for_each(walker, &cfs_symbol_list) {
+               sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+               if (!strcmp(sym->name, name)) {
+                       up_write(&cfs_symbol_lock);
+                       cfs_free(new);
+                       return 0; /* alreay registerred */
+               }
+       }
+       cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
+       up_write(&cfs_symbol_lock);
 
     return 0;
 }
@@ -293,7 +293,7 @@ cfs_symbol_unregister(const char *name)
     cfs_list_t              *nxt;
     struct cfs_symbol       *sym = NULL;
 
-    cfs_down_write(&cfs_symbol_lock);
+       down_write(&cfs_symbol_lock);
     cfs_list_for_each_safe(walker, nxt, &cfs_symbol_list) {
         sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
@@ -303,7 +303,7 @@ cfs_symbol_unregister(const char *name)
             break;
         }
     }
-    cfs_up_write(&cfs_symbol_lock);
+       up_write(&cfs_symbol_lock);
 }
 
 /*
@@ -326,15 +326,15 @@ cfs_symbol_clean()
     cfs_list_t          *walker;
     struct cfs_symbol   *sym = NULL;
 
-    cfs_down_write(&cfs_symbol_lock);
-    cfs_list_for_each(walker, &cfs_symbol_list) {
-        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
-        LASSERT(sym->ref == 0);
-        cfs_list_del (&sym->sym_list);
-        cfs_free(sym);
-    }
-    cfs_up_write(&cfs_symbol_lock);
-    return;
+       down_write(&cfs_symbol_lock);
+       cfs_list_for_each(walker, &cfs_symbol_list) {
+               sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+               LASSERT(sym->ref == 0);
+               cfs_list_del (&sym->sym_list);
+               cfs_free(sym);
+       }
+       up_write(&cfs_symbol_lock);
+       return;
 }
 
 
@@ -761,16 +761,16 @@ void cfs_libc_init();
 int
 libcfs_arch_init(void)
 {
-    int         rc;
+       int             rc;
+       spinlock_t      lock;
 
-    cfs_spinlock_t  lock;
-    /* Workground to check the system is MP build or UP build */
-    cfs_spin_lock_init(&lock);
-    cfs_spin_lock(&lock);
-    libcfs_is_mp_system = (int)lock.lock;
-    /* MP build system: it's a real spin, for UP build system, it
-       only raises the IRQL to DISPATCH_LEVEL */
-    cfs_spin_unlock(&lock);
+       /* Workground to check the system is MP build or UP build */
+       spin_lock_init(&lock);
+       spin_lock(&lock);
+       libcfs_is_mp_system = (int)lock.lock;
+       /* MP build system: it's a real spin, for UP build system, it
+        * only raises the IRQL to DISPATCH_LEVEL */
+       spin_unlock(&lock);
 
     /* initialize libc routines (confliction between libcnptr.lib
        and kernel ntoskrnl.lib) */
index 6f46d09..e86413c 100644 (file)
@@ -70,15 +70,15 @@ cfs_sysctl_table_header_t       root_table_header;
 /* The global lock to protect all the access */
 
 #if LIBCFS_PROCFS_SPINLOCK
-cfs_spinlock_t                  proc_fs_lock;
+spinlock_t                     proc_fs_lock;
 
-#define INIT_PROCFS_LOCK()      cfs_spin_lock_init(&proc_fs_lock)
-#define LOCK_PROCFS()           cfs_spin_lock(&proc_fs_lock)
-#define UNLOCK_PROCFS()         cfs_spin_unlock(&proc_fs_lock)
+#define INIT_PROCFS_LOCK()     spin_lock_init(&proc_fs_lock)
+#define LOCK_PROCFS()          spin_lock(&proc_fs_lock)
+#define UNLOCK_PROCFS()                spin_unlock(&proc_fs_lock)
 
 #else
 
-cfs_mutex_t                     proc_fs_lock;
+struct mutex                           proc_fs_lock;
 
 #define INIT_PROCFS_LOCK()      cfs_init_mutex(&proc_fs_lock)
 #define LOCK_PROCFS()           cfs_mutex_down(&proc_fs_lock)
@@ -1836,7 +1836,7 @@ int seq_open(struct file *file, const struct seq_operations *op)
                file->private_data = p;
        }
        memset(p, 0, sizeof(*p));
-       cfs_mutex_init(&p->lock);
+       mutex_init(&p->lock);
        p->op = op;
 
        /*
@@ -1870,7 +1870,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
        void *p;
        int err = 0;
 
-       cfs_mutex_lock(&m->lock);
+       mutex_lock(&m->lock);
        /*
         * seq_file->op->..m_start/m_stop/m_next may do special actions
         * or optimisations based on the file->f_version, so we want to
@@ -1963,7 +1963,7 @@ Done:
        else
                *ppos += copied;
        file->f_version = m->version;
-       cfs_mutex_unlock(&m->lock);
+       mutex_unlock(&m->lock);
        return copied;
 Enomem:
        err = -ENOMEM;
@@ -2040,7 +2040,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
        struct seq_file *m = (struct seq_file *)file->private_data;
        long long retval = -EINVAL;
 
-       cfs_mutex_lock(&m->lock);
+       mutex_lock(&m->lock);
        m->version = file->f_version;
        switch (origin) {
                case 1:
@@ -2064,7 +2064,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
                        }
        }
        file->f_version = m->version;
-       cfs_mutex_unlock(&m->lock);
+       mutex_unlock(&m->lock);
        return retval;
 }
 EXPORT_SYMBOL(seq_lseek);
index fc62f39..dc4a301 100644 (file)
@@ -60,7 +60,7 @@ void cfs_waitq_init(cfs_waitq_t *waitq)
     waitq->magic = CFS_WAITQ_MAGIC;
     waitq->flags = 0;
     CFS_INIT_LIST_HEAD(&(waitq->waiters));
-    cfs_spin_lock_init(&(waitq->guard));
+       spin_lock_init(&(waitq->guard));
 }
 
 /*
@@ -169,7 +169,7 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
     LASSERT(waitqid < CFS_WAITQ_CHANNELS);
 
-    cfs_spin_lock(&(waitq->guard));
+       spin_lock(&(waitq->guard));
     LASSERT(link->waitq[waitqid].waitq == NULL);
     link->waitq[waitqid].waitq = waitq;
     if (link->flags & CFS_WAITQ_EXCLUSIVE) {
@@ -177,7 +177,7 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
     } else {
         cfs_list_add(&link->waitq[waitqid].link, &waitq->waiters);
     }
-    cfs_spin_unlock(&(waitq->guard));
+       spin_unlock(&(waitq->guard));
 }
 /*
  * cfs_waitq_add
@@ -254,7 +254,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 
-    cfs_spin_lock(&(waitq->guard));
+       spin_lock(&(waitq->guard));
 
     for (i=0; i < CFS_WAITQ_CHANNELS; i++) {
         if (link->waitq[i].waitq == waitq)
@@ -268,7 +268,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
         cfs_enter_debugger();
     }
 
-    cfs_spin_unlock(&(waitq->guard));
+       spin_unlock(&(waitq->guard));
 }
 
 /*
@@ -319,7 +319,7 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
 
-    cfs_spin_lock(&waitq->guard);
+       spin_lock(&waitq->guard);
     cfs_list_for_each_entry_typed(scan, &waitq->waiters, 
                             cfs_waitlink_channel_t,
                             link) {
@@ -337,8 +337,8 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
             break;
     }
 
-    cfs_spin_unlock(&waitq->guard);
-    return;
+       spin_unlock(&waitq->guard);
+       return;
 }
 
 /*
index c784f6a..293b630 100644 (file)
@@ -346,7 +346,7 @@ KsAllocateKsTsdu()
 {
     PKS_TSDU    KsTsdu = NULL;
 
-    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+       spin_lock(&(ks_data.ksnd_tsdu_lock));
 
     if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
 
@@ -362,7 +362,7 @@ KsAllocateKsTsdu()
                         ks_data.ksnd_tsdu_slab, 0);
     }
 
-    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+       spin_unlock(&(ks_data.ksnd_tsdu_lock));
 
     if (NULL != KsTsdu) {
         RtlZeroMemory(KsTsdu, ks_data.ksnd_tsdu_size);
@@ -415,14 +415,14 @@ KsPutKsTsdu(
     PKS_TSDU  KsTsdu
     )
 {
-    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
-    if (ks_data.ksnd_nfreetsdus > 128) {
-        KsFreeKsTsdu(KsTsdu);
-    } else {
-        cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
-        ks_data.ksnd_nfreetsdus++;
-    }
-    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+       spin_lock(&(ks_data.ksnd_tsdu_lock));
+       if (ks_data.ksnd_nfreetsdus > 128) {
+               KsFreeKsTsdu(KsTsdu);
+       } else {
+               cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+               ks_data.ksnd_nfreetsdus++;
+       }
+       spin_unlock(&(ks_data.ksnd_tsdu_lock));
 }
 
 /* with tconn lock acquired */
@@ -1282,7 +1282,7 @@ KsInitializeKsTsduMgr(
     TsduMgr->NumOfTsdu  = 0;
     TsduMgr->TotalBytes = 0;
 
-    cfs_spin_lock_init(&TsduMgr->Lock);
+       spin_lock_init(&TsduMgr->Lock);
 }
 
 
@@ -2953,7 +2953,7 @@ KsAcceptCompletionRoutine(
 
     LASSERT(child->kstc_type == kstt_child);
 
-    cfs_spin_lock(&(child->kstc_lock));
+       spin_lock(&(child->kstc_lock));
 
     LASSERT(parent->kstc_state == ksts_listening);
     LASSERT(child->kstc_state == ksts_connecting);
@@ -2971,7 +2971,7 @@ KsAcceptCompletionRoutine(
             FALSE
             );
 
-        cfs_spin_unlock(&(child->kstc_lock));
+       spin_unlock(&(child->kstc_lock));
 
         KsPrint((2, "KsAcceptCompletionRoutine: singal parent: %p (child: %p)\n",
                     parent, child));
@@ -2983,7 +2983,7 @@ KsAcceptCompletionRoutine(
         child->child.kstc_busy = FALSE;
         child->kstc_state = ksts_associated;
 
-        cfs_spin_unlock(&(child->kstc_lock));
+       spin_unlock(&(child->kstc_lock));
     }
 
     /* now free the Irp */
@@ -3001,7 +3001,7 @@ KsSearchIpAddress(PUNICODE_STRING  DeviceName)
     ks_addr_slot_t * slot = NULL;
     PLIST_ENTRY      list = NULL;
 
-    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+       spin_lock(&ks_data.ksnd_addrs_lock);
 
     list = ks_data.ksnd_addrs_list.Flink;
     while (list != &ks_data.ksnd_addrs_list) {
@@ -3016,15 +3016,15 @@ KsSearchIpAddress(PUNICODE_STRING  DeviceName)
         slot = NULL;
     }
 
-    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+       spin_unlock(&ks_data.ksnd_addrs_lock);
 
-    return slot;
+       return slot;
 }
 
 void
 KsCleanupIpAddresses()
 {
-    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+       spin_lock(&ks_data.ksnd_addrs_lock);
 
     while (!IsListEmpty(&ks_data.ksnd_addrs_list)) {
 
@@ -3038,7 +3038,7 @@ KsCleanupIpAddresses()
     }
 
     cfs_assert(ks_data.ksnd_naddrs == 0);
-    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+       spin_unlock(&ks_data.ksnd_addrs_lock);
 }
 
 VOID
@@ -3081,7 +3081,7 @@ KsAddAddressHandler(
 
             slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
             if (slot != NULL) {
-                cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+               spin_lock(&ks_data.ksnd_addrs_lock);
                 InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
                 sprintf(slot->iface, "eth%d", ks_data.ksnd_naddrs++);
                 slot->ip_addr = ntohl(IpAddress->in_addr);
@@ -3091,7 +3091,7 @@ KsAddAddressHandler(
                 slot->devname.Length = DeviceName->Length;
                 slot->devname.MaximumLength = DeviceName->Length + sizeof(WCHAR);
                 slot->devname.Buffer = slot->buffer;
-                cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+               spin_unlock(&ks_data.ksnd_addrs_lock);
 
                 KsPrint((0, "KsAddAddressHandle: %s added: ip=%xh(%d.%d.%d.%d)\n",
                             slot->iface, IpAddress->in_addr,
@@ -3142,7 +3142,7 @@ KsRegisterPnpHandlers()
 
     /* initialize the global ks_data members */
     RtlInitUnicodeString(&ks_data.ksnd_client_name, TDILND_MODULE_NAME);
-    cfs_spin_lock_init(&ks_data.ksnd_addrs_lock);
+       spin_lock_init(&ks_data.ksnd_addrs_lock);
     InitializeListHead(&ks_data.ksnd_addrs_list);
 
     /* register the pnp handlers */
@@ -3209,15 +3209,15 @@ KsGetVacancyBacklog(
 
         cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
             child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
-            cfs_spin_lock(&(child->kstc_lock));
+           spin_lock(&(child->kstc_lock));
 
             if (!child->child.kstc_busy) {
                 LASSERT(child->kstc_state == ksts_associated);
                 child->child.kstc_busy = TRUE;
-                cfs_spin_unlock(&(child->kstc_lock));
+               spin_unlock(&(child->kstc_lock));
                 break;
             } else {
-                cfs_spin_unlock(&(child->kstc_lock));
+               spin_unlock(&(child->kstc_lock));
                 child = NULL;
             }
         }
@@ -3273,7 +3273,7 @@ KsConnectEventHandler(
 
     LASSERT(parent->kstc_type == kstt_listener);
 
-    cfs_spin_lock(&(parent->kstc_lock));
+       spin_lock(&(parent->kstc_lock));
 
     if (parent->kstc_state == ksts_listening) {
 
@@ -3311,11 +3311,11 @@ KsConnectEventHandler(
 
         if (child) {
 
-            cfs_spin_lock(&(child->kstc_lock));
+           spin_lock(&(child->kstc_lock));
             child->child.kstc_info.ConnectionInfo = ConnectionInfo;
             child->child.kstc_info.Remote = ConnectionInfo->RemoteAddress;
             child->kstc_state = ksts_connecting;
-            cfs_spin_unlock(&(child->kstc_lock));
+           spin_unlock(&(child->kstc_lock));
 
         } else {
 
@@ -3354,13 +3354,13 @@ KsConnectEventHandler(
         goto errorout;
     }
 
-    cfs_spin_unlock(&(parent->kstc_lock));
+       spin_unlock(&(parent->kstc_lock));
 
     return Status;
 
 errorout:
 
-    cfs_spin_unlock(&(parent->kstc_lock));
+       spin_unlock(&(parent->kstc_lock));
 
     *AcceptIrp = NULL;
     *ConnectionContext = NULL;
@@ -3436,10 +3436,10 @@ KsDisconnectHelper(PKS_DISCONNECT_WORKITEM WorkItem)
 
     KeSetEvent(&(WorkItem->Event), 0, FALSE);
 
-    cfs_spin_lock(&(tconn->kstc_lock));
-    cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
-    cfs_spin_unlock(&(tconn->kstc_lock));
-    ks_put_tconn(tconn);
+       spin_lock(&(tconn->kstc_lock));
+       cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
+       spin_unlock(&(tconn->kstc_lock));
+       ks_put_tconn(tconn);
 }
 
 
@@ -3485,7 +3485,7 @@ KsDisconnectEventHandler(
                  tconn, DisconnectFlags));
 
     ks_get_tconn(tconn);
-    cfs_spin_lock(&(tconn->kstc_lock));
+    spin_lock(&(tconn->kstc_lock));
 
     WorkItem = &(tconn->kstc_disconnect);
 
@@ -3518,7 +3518,7 @@ KsDisconnectEventHandler(
         }
     }
 
-    cfs_spin_unlock(&(tconn->kstc_lock));
+    spin_unlock(&(tconn->kstc_lock));
     ks_put_tconn(tconn);
 
     return  (Status);
@@ -4331,16 +4331,16 @@ ks_create_tconn()
                 tconn
             );
 
-        cfs_spin_lock_init(&(tconn->kstc_lock));
+       spin_lock_init(&(tconn->kstc_lock));
 
         ks_get_tconn(tconn);
-        cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+       spin_lock(&(ks_data.ksnd_tconn_lock));
 
         /* attach it into global list in ks_data */
 
         cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
         ks_data.ksnd_ntconns++;
-        cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+       spin_unlock(&(ks_data.ksnd_tconn_lock));
 
         tconn->kstc_rcv_wnd = tconn->kstc_snd_wnd = 0x10000;
     }
@@ -4368,7 +4368,7 @@ ks_free_tconn(ks_tconn_t * tconn)
 {
     LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
 
-    cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+       spin_lock(&(ks_data.ksnd_tconn_lock));
 
     /* remove it from the global list */
     cfs_list_del(&tconn->kstc_list);
@@ -4379,7 +4379,7 @@ ks_free_tconn(ks_tconn_t * tconn)
     if (ks_data.ksnd_ntconns == 0) {
         cfs_wake_event(&ks_data.ksnd_tconn_exit);
     }
-    cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+       spin_unlock(&(ks_data.ksnd_tconn_lock));
 
     /* free the structure memory */
     cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
@@ -4534,13 +4534,13 @@ ks_put_tconn(
 {
     if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
 
-        cfs_spin_lock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
 
         if ( ( tconn->kstc_type == kstt_child ||
                tconn->kstc_type == kstt_sender ) &&
              ( tconn->kstc_state == ksts_connected ) ) {
 
-            cfs_spin_unlock(&(tconn->kstc_lock));
+           spin_unlock(&(tconn->kstc_lock));
 
             ks_abort_tconn(tconn);
 
@@ -4557,7 +4557,7 @@ ks_put_tconn(
                 cfs_set_flag(tconn->kstc_flags, KS_TCONN_DESTROY_BUSY);
             }
 
-            cfs_spin_unlock(&(tconn->kstc_lock));
+           spin_unlock(&(tconn->kstc_lock));
         }
     }
 }
@@ -4621,8 +4621,8 @@ ks_destroy_tconn(
                 tconn->kstc_addr.FileObject
                 );
 
-        cfs_spin_lock(&tconn->child.kstc_parent->kstc_lock);
-        cfs_spin_lock(&tconn->kstc_lock);
+       spin_lock(&tconn->child.kstc_parent->kstc_lock);
+       spin_lock(&tconn->kstc_lock);
 
         tconn->kstc_state = ksts_inited;
 
@@ -4646,8 +4646,8 @@ ks_destroy_tconn(
             tconn->child.kstc_queued = FALSE;
         }
 
-        cfs_spin_unlock(&tconn->kstc_lock);
-        cfs_spin_unlock(&tconn->child.kstc_parent->kstc_lock);
+       spin_unlock(&tconn->kstc_lock);
+       spin_unlock(&tconn->child.kstc_parent->kstc_lock);
 
         /* drop the reference of the parent tconn */
         ks_put_tconn(tconn->child.kstc_parent);
@@ -5222,7 +5222,7 @@ ks_build_tconn(
                     NULL
                     );
 
-    cfs_spin_lock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
 
     if (NT_SUCCESS(status)) {
 
@@ -5233,7 +5233,7 @@ ks_build_tconn(
         tconn->sender.kstc_info.ConnectionInfo = ConnectionInfo;
         tconn->sender.kstc_info.Remote         = ConnectionInfo->RemoteAddress;
 
-        cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_unlock(&(tconn->kstc_lock));
 
     } else {
 
@@ -5247,7 +5247,7 @@ ks_build_tconn(
         rc = cfs_error_code(status);
 
         tconn->kstc_state = ksts_associated;
-        cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_unlock(&(tconn->kstc_lock));
 
         /* disassocidate the connection and the address object,
            after cleanup,  it's safe to set the state to abort ... */
@@ -5405,7 +5405,7 @@ ks_disconnect_tconn(
             cfs_enter_debugger();
         }
 
-        cfs_spin_lock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
 
         /* cleanup the tsdumgr Lists */
         KsCleanupTsdu (tconn);
@@ -5422,7 +5422,7 @@ ks_disconnect_tconn(
         info->ConnectionInfo = NULL;
         info->Remote = NULL;
 
-        cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_unlock(&(tconn->kstc_lock));
     }
 
     status = STATUS_SUCCESS;
@@ -5460,7 +5460,7 @@ ks_abort_tconn(
     WorkItem = &(tconn->kstc_disconnect);
 
     ks_get_tconn(tconn);
-    cfs_spin_lock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
 
     if (tconn->kstc_state != ksts_connected) {
         ks_put_tconn(tconn);
@@ -5480,7 +5480,7 @@ ks_abort_tconn(
         }
     }
 
-    cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_unlock(&(tconn->kstc_lock));
 }
 
 
@@ -5556,7 +5556,7 @@ KsQueueTdiEngine(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr)
     engs = &TsduMgr->Slot;
 
     if (!engs->queued) {
-        cfs_spin_lock(&engm->lock);
+       spin_lock(&engm->lock);
         if (!engs->queued) {
             cfs_list_add_tail(&engs->link, &engm->list);
             engs->queued = TRUE;
@@ -5565,7 +5565,7 @@ KsQueueTdiEngine(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr)
             engs->tsdumgr = TsduMgr;
             KeSetEvent(&(engm->start),0, FALSE);
         }
-        cfs_spin_unlock(&engm->lock);
+       spin_unlock(&engm->lock);
         KsPrint((4, "KsQueueTdiEngine: TsduMgr=%p is queued to engine %p\n",
                     TsduMgr, engm));
     }
@@ -5582,7 +5582,7 @@ KsRemoveTdiEngine(PKS_TSDUMGR TsduMgr)
     if (engs->queued) {
         engm = engs->emgr;
         LASSERT(engm != NULL);
-        cfs_spin_lock(&engm->lock);
+       spin_lock(&engm->lock);
         if (engs->queued) {
             cfs_list_del(&engs->link);
             engs->queued = FALSE;
@@ -5590,7 +5590,7 @@ KsRemoveTdiEngine(PKS_TSDUMGR TsduMgr)
             engs->emgr = NULL;
             engs->tsdumgr = NULL;
         }
-        cfs_spin_unlock(&engm->lock);
+       spin_unlock(&engm->lock);
         KsPrint((4, "KsQueueTdiEngine: TsduMgr %p is removed from engine %p\n",
                     TsduMgr, engm));
     }
@@ -5807,9 +5807,9 @@ KsDeliveryEngineThread(void * context)
 
         cfs_wait_event_internal(&engm->start, 0);
 
-        cfs_spin_lock(&engm->lock);
+       spin_lock(&engm->lock);
         if (cfs_list_empty(&engm->list)) {
-            cfs_spin_unlock(&engm->lock);
+           spin_unlock(&engm->lock);
             continue;
         }
 
@@ -5820,7 +5820,7 @@ KsDeliveryEngineThread(void * context)
         LASSERT(engs->queued);
         engs->emgr = NULL;
         engs->queued = FALSE;
-        cfs_spin_unlock(&engm->lock);
+       spin_unlock(&engm->lock);
 
         tconn = engs->tconn;
         LASSERT(tconn->kstc_magic == KS_TCONN_MAGIC);
@@ -5859,7 +5859,7 @@ ks_init_tdi_data()
     /* initialize tconn related globals */
     RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
 
-    cfs_spin_lock_init(&ks_data.ksnd_tconn_lock);
+       spin_lock_init(&ks_data.ksnd_tconn_lock);
     CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
     cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
 
@@ -5872,7 +5872,7 @@ ks_init_tdi_data()
     }
 
     /* initialize tsdu related globals */
-    cfs_spin_lock_init(&ks_data.ksnd_tsdu_lock);
+       spin_lock_init(&ks_data.ksnd_tsdu_lock);
     CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
     ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
     ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
@@ -5895,7 +5895,7 @@ ks_init_tdi_data()
         goto errorout;
     }
     for (i = 0; i < ks_data.ksnd_engine_nums; i++) {
-        cfs_spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
+               spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
         cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
         cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
         CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
@@ -5954,12 +5954,12 @@ ks_fini_tdi_data()
     }
 
     /* we need wait until all the tconn are freed */
-    cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+       spin_lock(&(ks_data.ksnd_tconn_lock));
 
     if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
         cfs_wake_event(&ks_data.ksnd_tconn_exit);
     }
-    cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+       spin_unlock(&(ks_data.ksnd_tconn_lock));
 
     /* now wait on the tconn exit event */
     cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
@@ -5969,7 +5969,7 @@ ks_fini_tdi_data()
     ks_data.ksnd_tconn_slab = NULL;
 
     /* clean up all the tsud buffers in the free list */
-    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+       spin_lock(&(ks_data.ksnd_tsdu_lock));
     cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
         KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
 
@@ -5977,7 +5977,7 @@ ks_fini_tdi_data()
                 ks_data.ksnd_tsdu_slab,
                 KsTsdu );
     }
-    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+       spin_unlock(&(ks_data.ksnd_tsdu_lock));
 
     /* it's safe to delete the tsdu slab ... */
     cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
@@ -6101,22 +6101,22 @@ ks_replenish_backlogs(
         /* create the backlog child tconn */
         backlog = ks_create_child_tconn(parent);
 
-        cfs_spin_lock(&(parent->kstc_lock));
+       spin_lock(&(parent->kstc_lock));
 
         if (backlog) {
-            cfs_spin_lock(&backlog->kstc_lock);
+           spin_lock(&backlog->kstc_lock);
             /* attch it into the listing list of daemon */
             cfs_list_add( &backlog->child.kstc_link,
                       &parent->listener.kstc_listening.list );
             parent->listener.kstc_listening.num++;
 
             backlog->child.kstc_queued = TRUE;
-            cfs_spin_unlock(&backlog->kstc_lock);
+           spin_unlock(&backlog->kstc_lock);
         } else {
             cfs_enter_debugger();
         }
 
-        cfs_spin_unlock(&(parent->kstc_lock));
+       spin_unlock(&(parent->kstc_lock));
     }
 }
 
@@ -6151,13 +6151,13 @@ ks_start_listen(ks_tconn_t *tconn, int nbacklog)
         return rc;
     }
 
-    cfs_spin_lock(&(tconn->kstc_lock));
-    tconn->listener.nbacklog = nbacklog;
-    tconn->kstc_state = ksts_listening;
-    cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
-    cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
+       tconn->listener.nbacklog = nbacklog;
+       tconn->kstc_state = ksts_listening;
+       cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
+       spin_unlock(&(tconn->kstc_lock));
 
-    return rc;
+       return rc;
 }
 
 void
@@ -6169,7 +6169,7 @@ ks_stop_listen(ks_tconn_t *tconn)
     /* reset all tdi event callbacks to NULL */
     KsResetHandlers (tconn);
 
-    cfs_spin_lock(&tconn->kstc_lock);
+       spin_lock(&tconn->kstc_lock);
 
     cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
 
@@ -6181,7 +6181,7 @@ ks_stop_listen(ks_tconn_t *tconn)
         ks_put_tconn(backlog);
     }
 
-    cfs_spin_unlock(&tconn->kstc_lock);
+       spin_unlock(&tconn->kstc_lock);
 
     /* wake up it from the waiting on new incoming connections */
     KeSetEvent(&tconn->listener.kstc_accept_event, 0, FALSE);
@@ -6217,10 +6217,10 @@ ks_wait_child_tconn(
 
     ks_replenish_backlogs(parent, parent->listener.nbacklog);
 
-    cfs_spin_lock(&(parent->kstc_lock));
+       spin_lock(&(parent->kstc_lock));
 
-    if (parent->listener.kstc_listening.num <= 0) {
-        cfs_spin_unlock(&(parent->kstc_lock));
+       if (parent->listener.kstc_listening.num <= 0) {
+               spin_unlock(&(parent->kstc_lock));
         return -1;
     }
 
@@ -6231,7 +6231,7 @@ again:
     cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
         backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
 
-        cfs_spin_lock(&(backlog->kstc_lock));
+       spin_lock(&(backlog->kstc_lock));
 
         if (backlog->child.kstc_accepted) {
 
@@ -6245,16 +6245,16 @@ again:
             parent->listener.kstc_listening.num--;
             backlog->child.kstc_queueno = 1;
 
-            cfs_spin_unlock(&(backlog->kstc_lock));
+           spin_unlock(&(backlog->kstc_lock));
 
             break;
         } else {
-            cfs_spin_unlock(&(backlog->kstc_lock));
+           spin_unlock(&(backlog->kstc_lock));
             backlog = NULL;
         }
     }
 
-    cfs_spin_unlock(&(parent->kstc_lock));
+       spin_unlock(&(parent->kstc_lock));
 
     /* we need wait until new incoming connections are requested
        or the case of shuting down the listenig daemon thread  */
@@ -6270,11 +6270,11 @@ again:
                 NULL
                 );
 
-        cfs_spin_lock(&(parent->kstc_lock));
+       spin_lock(&(parent->kstc_lock));
 
         /* check whether it's exptected to exit ? */
         if (!cfs_is_flag_set(parent->kstc_flags, KS_TCONN_DAEMON_STARTED)) {
-            cfs_spin_unlock(&(parent->kstc_lock));
+           spin_unlock(&(parent->kstc_lock));
         } else {
             goto again;
         }
@@ -6524,7 +6524,7 @@ int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
     ks_addr_slot_t * slot = NULL;
     PLIST_ENTRY      list = NULL;
 
-    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+       spin_lock(&ks_data.ksnd_addrs_lock);
 
     list = ks_data.ksnd_addrs_list.Flink;
     while (list != &ks_data.ksnd_addrs_list) {
@@ -6539,7 +6539,7 @@ int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
         slot = NULL;
     }
 
-    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+       spin_unlock(&ks_data.ksnd_addrs_lock);
 
     return (int)(slot == NULL);
 }
@@ -6550,7 +6550,7 @@ int libcfs_ipif_enumerate(char ***names)
     PLIST_ENTRY      list = NULL;
     int              nips = 0;
 
-    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+       spin_lock(&ks_data.ksnd_addrs_lock);
 
     *names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
     if (*names == NULL) {
@@ -6569,7 +6569,7 @@ int libcfs_ipif_enumerate(char ***names)
 
 errorout:
 
-    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+       spin_unlock(&ks_data.ksnd_addrs_lock);
     return nips;
 }
 
@@ -6626,7 +6626,7 @@ void libcfs_sock_abort_accept(struct socket *sock)
 {
     LASSERT(sock->kstc_type == kstt_listener);
 
-    cfs_spin_lock(&(sock->kstc_lock));
+       spin_lock(&(sock->kstc_lock));
 
     /* clear the daemon flag */
     cfs_clear_flag(sock->kstc_flags, KS_TCONN_DAEMON_STARTED);
@@ -6634,7 +6634,7 @@ void libcfs_sock_abort_accept(struct socket *sock)
     /* wake up it from the waiting on new incoming connections */
     KeSetEvent(&sock->listener.kstc_accept_event, 0, FALSE);
 
-    cfs_spin_unlock(&(sock->kstc_lock));
+       spin_unlock(&(sock->kstc_lock));
 }
 
 /*
@@ -6718,7 +6718,7 @@ int libcfs_sock_getaddr(struct socket *socket, int remote, __u32 *ip, int *port)
 {
     PTRANSPORT_ADDRESS  taddr = NULL;
 
-    cfs_spin_lock(&socket->kstc_lock);
+       spin_lock(&socket->kstc_lock);
     if (remote) {
         if (socket->kstc_type == kstt_sender) {
             taddr = socket->sender.kstc_info.Remote;
@@ -6736,12 +6736,12 @@ int libcfs_sock_getaddr(struct socket *socket, int remote, __u32 *ip, int *port)
         if (port != NULL)
             *port = ntohs (addr->sin_port);
     } else {
-        cfs_spin_unlock(&socket->kstc_lock);
-        return -ENOTCONN;
-    }
+               spin_unlock(&socket->kstc_lock);
+               return -ENOTCONN;
+       }
 
-    cfs_spin_unlock(&socket->kstc_lock);
-    return 0;
+       spin_unlock(&socket->kstc_lock);
+       return 0;
 }
 
 int libcfs_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
index 168c973..54c8783 100644 (file)
@@ -46,7 +46,7 @@ static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
 
 char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
 
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
 
 int cfs_tracefile_init_arch()
 {
@@ -54,7 +54,7 @@ int cfs_tracefile_init_arch()
        int    j;
        struct cfs_trace_cpu_data *tcd;
 
-       cfs_init_rwsem(&cfs_tracefile_sem);
+       init_rwsem(&cfs_tracefile_sem);
 
        /* initialize trace_data */
        memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
@@ -111,27 +111,27 @@ void cfs_tracefile_fini_arch()
                cfs_trace_data[i] = NULL;
        }
 
-       cfs_fini_rwsem(&cfs_tracefile_sem);
+       fini_rwsem(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_read_lock()
 {
-       cfs_down_read(&cfs_tracefile_sem);
+       down_read(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_read_unlock()
 {
-       cfs_up_read(&cfs_tracefile_sem);
+       up_read(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_write_lock()
 {
-       cfs_down_write(&cfs_tracefile_sem);
+       down_write(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_write_unlock()
 {
-       cfs_up_write(&cfs_tracefile_sem);
+       up_write(&cfs_tracefile_sem);
 }
 
 cfs_trace_buf_type_t cfs_trace_buf_idx_get()
index 3ae1f45..811af88 100644 (file)
@@ -49,7 +49,7 @@ typedef struct cfs_wi_sched {
        cfs_list_t              ws_list;        /* chain on global list */
 #ifdef __KERNEL__
        /** serialised workitems */
-       cfs_spinlock_t          ws_lock;
+       spinlock_t              ws_lock;
        /** where schedulers sleep */
        cfs_waitq_t             ws_waitq;
 #endif
@@ -79,7 +79,7 @@ typedef struct cfs_wi_sched {
 
 struct cfs_workitem_data {
        /** serialize */
-       cfs_spinlock_t          wi_glock;
+       spinlock_t              wi_glock;
        /** list of all schedulers */
        cfs_list_t              wi_scheds;
        /** WI module is initialized */
@@ -92,13 +92,13 @@ struct cfs_workitem_data {
 static inline void
 cfs_wi_sched_lock(cfs_wi_sched_t *sched)
 {
-        cfs_spin_lock(&sched->ws_lock);
+       spin_lock(&sched->ws_lock);
 }
 
 static inline void
 cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
 {
-        cfs_spin_unlock(&sched->ws_lock);
+       spin_unlock(&sched->ws_lock);
 }
 
 static inline int
@@ -123,13 +123,13 @@ cfs_wi_sched_cansleep(cfs_wi_sched_t *sched)
 static inline void
 cfs_wi_sched_lock(cfs_wi_sched_t *sched)
 {
-        cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
 }
 
 static inline void
 cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
 {
-        cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 }
 
 #endif /* __KERNEL__ */
@@ -262,13 +262,13 @@ cfs_wi_scheduler (void *arg)
        if (sched->ws_cptab != NULL)
                cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt);
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
 
        LASSERT(sched->ws_starting == 1);
        sched->ws_starting--;
        sched->ws_nthreads++;
 
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
        cfs_wi_sched_lock(sched);
 
@@ -328,11 +328,11 @@ cfs_wi_scheduler (void *arg)
 
         cfs_wi_sched_unlock(sched);
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        sched->ws_nthreads--;
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
-        return 0;
+       return 0;
 }
 
 #else /* __KERNEL__ */
@@ -340,12 +340,12 @@ cfs_wi_scheduler (void *arg)
 int
 cfs_wi_check_events (void)
 {
-        int               n = 0;
-        cfs_workitem_t   *wi;
+       int               n = 0;
+       cfs_workitem_t   *wi;
 
-        cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
 
-        for (;;) {
+       for (;;) {
                struct cfs_wi_sched     *sched = NULL;
                struct cfs_wi_sched     *tmp;
 
@@ -368,18 +368,18 @@ cfs_wi_check_events (void)
                LASSERT(sched->ws_nscheduled > 0);
                sched->ws_nscheduled--;
 
-                LASSERT (wi->wi_scheduled);
-                wi->wi_scheduled = 0;
-                cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               LASSERT(wi->wi_scheduled);
+               wi->wi_scheduled = 0;
+               spin_unlock(&cfs_wi_data.wi_glock);
 
-                n++;
-                (*wi->wi_action) (wi);
+               n++;
+               (*wi->wi_action) (wi);
 
-                cfs_spin_lock(&cfs_wi_data.wi_glock);
-        }
+               spin_lock(&cfs_wi_data.wi_glock);
+       }
 
-        cfs_spin_unlock(&cfs_wi_data.wi_glock);
-        return n;
+       spin_unlock(&cfs_wi_data.wi_glock);
+       return n;
 }
 
 #endif
@@ -392,37 +392,37 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
        LASSERT(cfs_wi_data.wi_init);
        LASSERT(!cfs_wi_data.wi_stopping);
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        if (sched->ws_stopping) {
                CDEBUG(D_INFO, "%s is in progress of stopping\n",
                       sched->ws_name);
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
                return;
        }
 
        LASSERT(!cfs_list_empty(&sched->ws_list));
        sched->ws_stopping = 1;
 
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
        i = 2;
 #ifdef __KERNEL__
        cfs_waitq_broadcast(&sched->ws_waitq);
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        while (sched->ws_nthreads > 0) {
                CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
                       "waiting for %d threads of WI sched[%s] to terminate\n",
                       sched->ws_nthreads, sched->ws_name);
 
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
                cfs_pause(cfs_time_seconds(1) / 20);
-               cfs_spin_lock(&cfs_wi_data.wi_glock);
+               spin_lock(&cfs_wi_data.wi_glock);
        }
 
        cfs_list_del(&sched->ws_list);
 
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 #else
        SET_BUT_UNUSED(i);
 #endif
@@ -453,7 +453,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
        sched->ws_cpt = cpt;
 
 #ifdef __KERNEL__
-       cfs_spin_lock_init(&sched->ws_lock);
+       spin_lock_init(&sched->ws_lock);
        cfs_waitq_init(&sched->ws_waitq);
 #endif
        CFS_INIT_LIST_HEAD(&sched->ws_runq);
@@ -463,15 +463,15 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
        rc = 0;
 #ifdef __KERNEL__
        while (nthrs > 0)  {
-               cfs_spin_lock(&cfs_wi_data.wi_glock);
+               spin_lock(&cfs_wi_data.wi_glock);
                while (sched->ws_starting > 0) {
-                       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+                       spin_unlock(&cfs_wi_data.wi_glock);
                        cfs_schedule();
-                       cfs_spin_lock(&cfs_wi_data.wi_glock);
+                       spin_lock(&cfs_wi_data.wi_glock);
                }
 
                sched->ws_starting++;
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
 
                rc = cfs_create_thread(cfs_wi_scheduler, sched, 0);
                if (rc >= 0) {
@@ -482,13 +482,13 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
                CERROR("Failed to create thread for WI scheduler %s: %d\n",
                       name, rc);
 
-               cfs_spin_lock(&cfs_wi_data.wi_glock);
+               spin_lock(&cfs_wi_data.wi_glock);
 
                /* make up for cfs_wi_sched_destroy */
                cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
                sched->ws_starting--;
 
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
 
                cfs_wi_sched_destroy(sched);
                return rc;
@@ -496,9 +496,9 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
 #else
        SET_BUT_UNUSED(rc);
 #endif
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
        *sched_pp = sched;
        return 0;
@@ -510,7 +510,7 @@ cfs_wi_startup(void)
 {
        memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
 
-       cfs_spin_lock_init(&cfs_wi_data.wi_glock);
+       spin_lock_init(&cfs_wi_data.wi_glock);
        CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
        cfs_wi_data.wi_init = 1;
 
@@ -522,9 +522,9 @@ cfs_wi_shutdown (void)
 {
        struct cfs_wi_sched     *sched;
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        cfs_wi_data.wi_stopping = 1;
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
 #ifdef __KERNEL__
        /* nobody should contend on this list */
@@ -534,14 +534,14 @@ cfs_wi_shutdown (void)
        }
 
        cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
-               cfs_spin_lock(&cfs_wi_data.wi_glock);
+               spin_lock(&cfs_wi_data.wi_glock);
 
                while (sched->ws_nthreads != 0) {
-                       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+                       spin_unlock(&cfs_wi_data.wi_glock);
                        cfs_pause(cfs_time_seconds(1) / 20);
-                       cfs_spin_lock(&cfs_wi_data.wi_glock);
+                       spin_lock(&cfs_wi_data.wi_glock);
                }
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
        }
 #endif
        while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) {
index 90e6b32..259fbaf 100644 (file)
@@ -175,14 +175,14 @@ lnet_net_lock_current(void)
 
 #ifdef __KERNEL__
 
-#define lnet_ptl_lock(ptl)     cfs_spin_lock(&(ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl)   cfs_spin_unlock(&(ptl)->ptl_lock)
-#define lnet_eq_wait_lock()    cfs_spin_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock()  cfs_spin_unlock(&the_lnet.ln_eq_wait_lock)
-#define lnet_ni_lock(ni)       cfs_spin_lock(&(ni)->ni_lock)
-#define lnet_ni_unlock(ni)     cfs_spin_unlock(&(ni)->ni_lock)
-#define LNET_MUTEX_LOCK(m)     cfs_mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m)   cfs_mutex_unlock(m)
+#define lnet_ptl_lock(ptl)     spin_lock(&(ptl)->ptl_lock)
+#define lnet_ptl_unlock(ptl)   spin_unlock(&(ptl)->ptl_lock)
+#define lnet_eq_wait_lock()    spin_lock(&the_lnet.ln_eq_wait_lock)
+#define lnet_eq_wait_unlock()  spin_unlock(&the_lnet.ln_eq_wait_lock)
+#define lnet_ni_lock(ni)       spin_lock(&(ni)->ni_lock)
+#define lnet_ni_unlock(ni)     spin_unlock(&(ni)->ni_lock)
+#define LNET_MUTEX_LOCK(m)     mutex_lock(m)
+#define LNET_MUTEX_UNLOCK(m)   mutex_unlock(m)
 
 #else /* !__KERNEL__ */
 
index 4d3d614..24e3c1a 100644 (file)
@@ -412,7 +412,7 @@ struct lnet_tx_queue {
 
 typedef struct lnet_ni {
 #ifdef __KERNEL__
-       cfs_spinlock_t          ni_lock;
+       spinlock_t              ni_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
        int                     ni_lock;
@@ -638,7 +638,7 @@ struct lnet_match_table {
 
 typedef struct lnet_portal {
 #ifdef __KERNEL__
-       cfs_spinlock_t          ptl_lock;
+       spinlock_t              ptl_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
        int                     ptl_lock;
@@ -721,7 +721,7 @@ typedef struct
        struct lnet_res_container       ln_eq_container;
 #ifdef __KERNEL__
        cfs_waitq_t                     ln_eq_waitq;
-       cfs_spinlock_t                  ln_eq_wait_lock;
+       spinlock_t                      ln_eq_wait_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
        int                             ln_eq_wait_lock;
@@ -773,10 +773,10 @@ typedef struct
        cfs_list_t                      ln_rcd_zombie;
 #ifdef __KERNEL__
        /* serialise startup/shutdown */
-       cfs_semaphore_t                 ln_rc_signal;
+       struct semaphore                ln_rc_signal;
 
-       cfs_mutex_t                     ln_api_mutex;
-       cfs_mutex_t                     ln_lnd_mutex;
+       struct mutex                    ln_api_mutex;
+       struct mutex                    ln_lnd_mutex;
 #else
 # ifndef HAVE_LIBPTHREAD
        int                             ln_api_mutex;
index 7c0a8d1..028945a 100644 (file)
@@ -63,9 +63,9 @@ mxlnd_free_pages(kmx_pages_t *p)
         for (i = 0; i < npages; i++) {
                 if (p->mxg_pages[i] != NULL) {
                         __free_page(p->mxg_pages[i]);
-                        cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
-                        kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
-                        cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
+                       spin_lock(&kmxlnd_data.kmx_mem_lock);
+                       kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
+                       spin_unlock(&kmxlnd_data.kmx_mem_lock);
                 }
         }
 
@@ -96,9 +96,9 @@ mxlnd_alloc_pages(kmx_pages_t **pp, int npages)
                         mxlnd_free_pages(p);
                         return -ENOMEM;