--- /dev/null
+#!/bin/sed -f
+
+# Script to cleanup libcfs macros, it runs against the tree at build time.
+# Migrate libcfs to emulate Linux kernel APIs.
+# http://jira.whamcloud.com/browse/LU-1346
+
+# remove extra blank line
+# /^$/{N;/^\n$/D}
+
+################################################################################
+# lock - spinlock, rw_semaphore, rwlock, completion, semaphore, mutex
+# - lock_kernel, unlock_kernel, lockdep
+
+# spinlok
+/typedef *spinlock_t *cfs_spinlock_t;/d
+s/\bcfs_spinlock_t\b/spinlock_t/g
+s/\bcfs_spin_lock_init\b/spin_lock_init/g
+/#[ \t]*define[ \t]*\bspin_lock_init\b *( *\w* *)[ \t]*\bspin_lock_init\b *( *\w* *)/d
+s/\bcfs_spin_lock\b/spin_lock/g
+/#[ \t]*define[ \t]*\bspin_lock\b *( *\w* *)[ \t]*\bspin_lock\b *( *\w* *)/d
+s/\bcfs_spin_lock_bh\b/spin_lock_bh/g
+/#[ \t]*define[ \t]*\bspin_lock_bh\b *( *\w* *)[ \t]*\bspin_lock_bh\b *( *\w* *)/d
+s/\bcfs_spin_lock_bh_init\b/spin_lock_bh_init/g
+/#[ \t]*define[ \t]*\bspin_lock_bh_init\b *( *\w* *)[ \t]*\bspin_lock_bh_init\b *( *\w* *)/d
+s/\bcfs_spin_unlock\b/spin_unlock/g
+/#[ \t]*define[ \t]*\bspin_unlock\b *( *\w* *)[ \t]*\bspin_unlock\b *( *\w* *)/d
+s/\bcfs_spin_unlock_bh\b/spin_unlock_bh/g
+/#[ \t]*define[ \t]*\bspin_unlock_bh\b *( *\w* *)[ \t]*\bspin_unlock_bh\b *( *\w* *)/d
+s/\bcfs_spin_trylock\b/spin_trylock/g
+/#[ \t]*define[ \t]*\bspin_trylock\b *( *\w* *)[ \t]*\bspin_trylock\b *( *\w* *)/d
+s/\bcfs_spin_is_locked\b/spin_is_locked/g
+/#[ \t]*define[ \t]*\bspin_is_locked\b *( *\w* *)[ \t]*\bspin_is_locked\b *( *\w* *)/d
+
+s/\bcfs_spin_lock_irq\b/spin_lock_irq/g
+/#[ \t]*define[ \t]*\bspin_lock_irq\b *( *\w* *)[ \t]*\bspin_lock_irq\b *( *\w* *)/d
+s/\bcfs_spin_unlock_irq\b/spin_unlock_irq/g
+/#[ \t]*define[ \t]*\bspin_unlock_irq\b *( *\w* *)[ \t]*\bspin_unlock_irq\b *( *\w* *)/d
+s/\bcfs_read_lock_irqsave\b/read_lock_irqsave/g
+/#[ \t]*define[ \t]*\bread_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bread_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_write_lock_irqsave\b/write_lock_irqsave/g
+/#[ \t]*define[ \t]*\bwrite_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bwrite_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_write_unlock_irqrestore\b/write_unlock_irqrestore/g
+/#[ \t]*define[ \t]*\bwrite_unlock_irqrestore\b *( *\w* *, *\w* *)[ \t]*\bwrite_unlock_irqrestore\b *( *\w* *, *\w* *)/d
+s/\bcfs_spin_lock_irqsave\b/spin_lock_irqsave/g
+/#[ \t]*define[ \t]*\bspin_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bspin_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_spin_unlock_irqrestore\b/spin_unlock_irqrestore/g
+/#[ \t]*define[ \t]*\bspin_unlock_irqrestore\b *( *\w* *, *\w* *)[ \t]*\bspin_unlock_irqrestore\b *( *\w* *, *\w* *)/d
+s/\bCFS_SPIN_LOCK_UNLOCKED\b/SPIN_LOCK_UNLOCKED/g
+/#[ \t]*define[ \t]*\bSPIN_LOCK_UNLOCKED\b[ \t]*\bSPIN_LOCK_UNLOCKED\b/d
+
+# rw_semaphore
+s/\bcfs_rw_semaphore_t\b/struct rw_semaphore/g
+s/\bcfs_init_rwsem\b/init_rwsem/g
+/#[ \t]*define[ \t]*\binit_rwsem\b *( *\w* *)[ \t]*\binit_rwsem\b *( *\w* *)/d
+s/\bcfs_down_read\b/down_read/g
+/#[ \t]*define[ \t]*\bdown_read\b *( *\w* *)[ \t]*\bdown_read\b *( *\w* *)/d
+s/\bcfs_down_read_trylock\b/down_read_trylock/g
+/#[ \t]*define[ \t]*\bdown_read_trylock\b *( *\w* *)[ \t]*\bdown_read_trylock\b *( *\w* *)/d
+s/\bcfs_up_read\b/up_read/g
+/#[ \t]*define[ \t]*\bup_read\b *( *\w* *)[ \t]*\bup_read\b *( *\w* *)/d
+s/\bcfs_down_write\b/down_write/g
+/#[ \t]*define[ \t]*\bdown_write\b *( *\w* *)[ \t]*\bdown_write\b *( *\w* *)/d
+s/\bcfs_down_write_trylock\b/down_write_trylock/g
+/#[ \t]*define[ \t]*\bdown_write_trylock\b *( *\w* *)[ \t]*\bdown_write_trylock\b *( *\w* *)/d
+s/\bcfs_up_write\b/up_write/g
+/#[ \t]*define[ \t]*\bup_write\b *( *\w* *)[ \t]*\bup_write\b *( *\w* *)/d
+s/\bcfs_fini_rwsem\b/fini_rwsem/g
+s/\bCFS_DECLARE_RWSEM\b/DECLARE_RWSEM/g
+/#[ \t]*define[ \t]*\bDECLARE_RWSEM\b *( *\w* *)[ \t]*\bDECLARE_RWSEM\b *( *\w* *)/d
+
+s/\bcfs_semaphore\b/semaphore/g
+s/\bcfs_rw_semaphore\b/rw_semaphore/g
+s/\bcfs_init_completion_module\b/init_completion_module/g
+s/\bcfs_call_wait_handler\b/call_wait_handler/g
+s/\bcfs_wait_handler_t\b/wait_handler_t/g
+s/\bcfs_mt_completion_t\b/mt_completion_t/g
+s/\bcfs_mt_init_completion\b/mt_init_completion/g
+s/\bcfs_mt_wait_for_completion\b/mt_wait_for_completion/g
+s/\bcfs_mt_complete\b/mt_complete/g
+s/\bcfs_mt_fini_completion\b/mt_fini_completion/g
+s/\bcfs_mt_atomic_t\b/mt_atomic_t/g
+s/\bcfs_mt_atomic_read\b/mt_atomic_read/g
+s/\bcfs_mt_atomic_set\b/mt_atomic_set/g
+s/\bcfs_mt_atomic_dec_and_test\b/mt_atomic_dec_and_test/g
+s/\bcfs_mt_atomic_inc\b/mt_atomic_inc/g
+s/\bcfs_mt_atomic_dec\b/mt_atomic_dec/g
+s/\bcfs_mt_atomic_add\b/mt_atomic_add/g
+s/\bcfs_mt_atomic_sub\b/mt_atomic_sub/g
+
+# rwlock
+/typedef *rwlock_t *cfs_rwlock_t;/d
+s/\bcfs_rwlock_t\b/rwlock_t/g
+s/\bcfs_rwlock_init\b/rwlock_init/g
+/#[ \t]*define[ \t]*\brwlock_init\b *( *\w* *)[ \t]*\brwlock_init\b *( *\w* *)/d
+s/\bcfs_read_lock\b/read_lock/g
+/#[ \t]*define[ \t]*\bread_lock\b *( *\w* *)[ \t]*\bread_lock\b *( *\w* *)/d
+s/\bcfs_read_unlock\b/read_unlock/g
+/#[ \t]*define[ \t]*\bread_unlock\b *( *\w* *)[ \t]*\bread_unlock\b *( *\w* *)/d
+s/\bcfs_read_unlock_irqrestore\b/read_unlock_irqrestore/g
+#/#[ \t]*define[ \t]*\bread_unlock_irqrestore\b *( *\w* *)[ \t]*\bread_unlock_irqrestore\b *( *\w* *)/d
+/#define read_unlock_irqrestore(lock,flags) \\/{N;d}
+s/\bcfs_write_lock\b/write_lock/g
+/#[ \t]*define[ \t]*\bwrite_lock\b *( *\w* *)[ \t]*\bwrite_lock\b *( *\w* *)/d
+s/\bcfs_write_unlock\b/write_unlock/g
+/#[ \t]*define[ \t]*\bwrite_unlock\b *( *\w* *)[ \t]*\bwrite_unlock\b *( *\w* *)/d
+s/\bcfs_write_lock_bh\b/write_lock_bh/g
+/#[ \t]*define[ \t]*\bwrite_lock_bh\b *( *\w* *)[ \t]*\bwrite_lock_bh\b *( *\w* *)/d
+s/\bcfs_write_unlock_bh\b/write_unlock_bh/g
+/#[ \t]*define[ \t]*\bwrite_unlock_bh\b *( *\w* *)[ \t]*\bwrite_unlock_bh\b *( *\w* *)/d
+s/\bCFS_RW_LOCK_UNLOCKED\b/RW_LOCK_UNLOCKED/g
+/#[ \t]*define[ \t]*\bRW_LOCK_UNLOCKED\b *\bRW_LOCK_UNLOCKED\b */d
+
+# completion
+s/\bcfs_completion_t\b/struct completion/g
+s/\bCFS_DECLARE_COMPLETION\b/DECLARE_COMPLETION/g
+/#[ \t]*define[ \t]*\bDECLARE_COMPLETION\b *( *\w* *)[ \t]*\bDECLARE_COMPLETION\b *( *\w* *)/d
+s/\bCFS_INIT_COMPLETION\b/INIT_COMPLETION/g
+/#[ \t]*define[ \t]*\bINIT_COMPLETION\b *( *\w* *)[ \t]*\bINIT_COMPLETION\b *( *\w* *)/d
+s/\bCFS_COMPLETION_INITIALIZER\b/COMPLETION_INITIALIZER/g
+/#[ \t]*define[ \t]*\bCOMPLETION_INITIALIZER\b *( *\w* *)[ \t]*\bCOMPLETION_INITIALIZER\b *( *\w* *)/d
+s/\bcfs_init_completion\b/init_completion/g
+/#[ \t]*define[ \t]*\binit_completion\b *( *\w* *)[ \t]*\binit_completion\b *( *\w* *)/d
+s/\bcfs_complete\b/complete/g
+/#[ \t]*define[ \t]*\bcomplete\b *( *\w* *)[ \t]*\bcomplete\b *( *\w* *)/d
+s/\bcfs_wait_for_completion\b/wait_for_completion/g
+/#[ \t]*define[ \t]*\bwait_for_completion\b *( *\w* *)[ \t]*\bwait_for_completion\b *( *\w* *)/d
+s/\bcfs_wait_for_completion_interruptible\b/wait_for_completion_interruptible/g
+/#define wait_for_completion_interruptible(c) \\/{N;d}
+s/\bcfs_complete_and_exit\b/complete_and_exit/g
+/#[ \t]*define[ \t]*\bcomplete_and_exit\b *( *\w* *, *\w* *)[ \t]*\bcomplete_and_exit\b *( *\w* *, *\w* *)/d
+s/\bcfs_fini_completion\b/fini_completion/g
+
+# semaphore
+s/\bcfs_semaphore_t\b/struct semaphore/g
+s/\bCFS_DEFINE_SEMAPHORE\b/DEFINE_SEMAPHORE/g
+/#[ \t]*define[ \t]*\bDEFINE_SEMAPHORE\b *( *\w* *)[ \t]*\bDEFINE_SEMAPHORE\b *( *\w* *)/d
+s/\bcfs_sema_init\b/sema_init/g
+/#[ \t]*define[ \t]*\bsema_init\b *( *\w* *, *\w* *)[ \t]*\bsema_init\b *( *\w* *, *\w* *)/d
+s/\bcfs_up\b/up/g
+/#[ \t]*define[ \t]*\bup\b *( *\w* *)[ \t]*\bup\b *( *\w* *)/d
+s/\bcfs_down\b/down/g
+/#[ \t]*define[ \t]*\bdown\b *( *\w* *)[ \t]*\bdown\b *( *\w* *)/d
+s/\bcfs_down_interruptible\b/down_interruptible/g
+/#[ \t]*define[ \t]*\bdown_interruptible\b *( *\w* *)[ \t]*\bdown_interruptible\b *( *\w* *)/d
+s/\bcfs_down_trylock\b/down_trylock/g
+/#[ \t]*define[ \t]*\bdown_trylock\b *( *\w* *)[ \t]*\bdown_trylock\b *( *\w* *)/d
+
+# mutex
+s/\bcfs_mutex_t\b/struct mutex/g
+s/\bCFS_DEFINE_MUTEX\b/DEFINE_MUTEX/g
+/#[ \t]*define[ \t]*\DEFINE_MUTEX\b *( *name *)[ \t]*\bDEFINE_MUTEX\b *( *name *)/d
+s/\bcfs_mutex_init\b/mutex_init/g
+/#[ \t]*define[ \t]*\bmutex_init\b *( *\w* *)[ \t]*\bmutex_init\b *( *\w* *)/d
+s/\bcfs_mutex_lock\b/mutex_lock/g
+/#[ \t]*define[ \t]*\bmutex_lock\b *( *\w* *)[ \t]*\bmutex_lock\b *( *\w* *)/d
+s/\bcfs_mutex_unlock\b/mutex_unlock/g
+/#[ \t]*define[ \t]*\bmutex_unlock\b *( *\w* *)[ \t]*\bmutex_unlock\b *( *\w* *)/d
+s/\bcfs_mutex_lock_interruptible\b/mutex_lock_interruptible/g
+/#[ \t]*define[ \t]*\bmutex_lock_interruptible\b *( *\w* *)[ \t]*\bmutex_lock_interruptible\b *( *\w* *)/d
+s/\bcfs_mutex_trylock\b/mutex_trylock/g
+/#[ \t]*define[ \t]*\bmutex_trylock\b *( *\w* *)[ \t]*\bmutex_trylock\b *( *\w* *)/d
+s/\bcfs_mutex_is_locked\b/mutex_is_locked/g
+/#[ \t]*define[ \t]*\bmutex_is_locked\b *( *\w* *)[ \t]*\bmutex_is_locked\b *( *\w* *)/d
+s/\bcfs_mutex_destroy\b/mutex_destroy/g
+/#[ \t]*define[ \t]*\bmutex_destroy\b *( *\w* *)[ \t]*\bmutex_destroy\b *( *\w* *)/d
+
+# lock_kernel, unlock_kernel
+# s/\bcfs_lock_kernel\b/lock_kernel/g
+# /#[ \t]*define[ \t]*\block_kernel\b *( *)[ \t]*\block_kernel\b *( *)/d
+# s/\bcfs_unlock_kernel\b/unlock_kernel/g
+# /#[ \t]*define[ \t]*\bunlock_kernel\b *( *)[ \t]*\bunlock_kernel\b *( *)/d
+
+# lockdep
+s/\bcfs_lock_class_key\b/lock_class_key/g
+s/\bcfs_lock_class_key_t\b/struct lock_class_key/g
+s/\bcfs_lockdep_set_class\b/lockdep_set_class/g
+s/\bcfs_lockdep_off\b/lockdep_off/g
+s/\bcfs_lockdep_on\b/lockdep_on/g
+/#[ \t]*define[ \t]*\blockdep_off\b *( *)[ \t]*\blockdep_off\b *( *)/d
+/#[ \t]*define[ \t]*\blockdep_on\b *( *)[ \t]*\blockdep_on\b *( *)/d
+/#[ \t]*define[ \t]*\blockdep_set_class\b *( *\w* *, *\w* *)[ \t]*\blockdep_set_class\b *( *\w* *, *\w* *)/d
+
+s/\bcfs_mutex_lock_nested\b/mutex_lock_nested/g
+#/#[ \t]*define[ \t]*\bmutex_lock_nested\b *( *\w* *, *\w* *)[ \t]*\bmutex_lock_nested\b *( *\w* *, *\w* *)/d
+/#define mutex_lock_nested(mutex, subclass) \\/{N;d}
+s/\bcfs_spin_lock_nested\b/spin_lock_nested/g
+/#[ \t]*define[ \t]*\bspin_lock_nested\b *( *\w* *, *\w* *)[ \t]*\bspin_lock_nested\b *( *\w* *, *\w* *)/d
+s/\bcfs_down_read_nested\b/down_read_nested/g
+/#[ \t]*define[ \t]*\bdown_read_nested\b *( *\w* *, *\w* *)[ \t]*\bdown_read_nested\b *( *\w* *, *\w* *)/d
+s/\bcfs_down_write_nested\b/down_write_nested/g
+/#[ \t]*define[ \t]*\bdown_write_nested\b *( *\w* *, *\w* *)[ \t]*\bdown_write_nested\b *( *\w* *, *\w* *)/d
+
+###############################################################################
+# bitops
+
+s/\bcfs_test_bit\b/test_bit/g
+/#[ \t]*define[ \t]*\btest_bit\b *( *\w* *, *\w* *)[ \t]*\btest_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_set_bit\b/set_bit/g
+/#[ \t]*define[ \t]*\bset_bit\b *( *\w* *, *\w* *)[ \t]*\bset_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_clear_bit\b/clear_bit/g
+/#[ \t]*define[ \t]*\bclear_bit\b *( *\w* *, *\w* *)[ \t]*\bclear_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_test_and_set_bit\b/test_and_set_bit/g
+/#[ \t]*define[ \t]*\btest_and_set_bit\b *( *\w* *, *\w* *)[ \t]*\btest_and_set_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_test_and_clear_bit\b/test_and_clear_bit/g
+/#[ \t]*define[ \t]*\btest_and_clear_bit\b *( *\w* *, *\w* *)[ \t]*\btest_and_clear_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_first_bit\b/find_first_bit/g
+/#[ \t]*define[ \t]*\bfind_first_bit\b *( *\w* *, *\w* *)[ \t]*\bfind_first_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_first_zero_bit\b/find_first_zero_bit/g
+/#[ \t]*define[ \t]*\bfind_first_zero_bit\b *( *\w* *, *\w* *)[ \t]*\bfind_first_zero_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_next_bit\b/find_next_bit/g
+/#[ \t]*define[ \t]*\bfind_next_bit\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bfind_next_bit\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_find_next_zero_bit\b/find_next_zero_bit/g
+/#define find_next_zero_bit(addr, size, off) \\/{N;d}
+s/\bcfs_ffz\b/ffz/g
+/#[ \t]*define[ \t]*\bffz\b *( *\w* *)[ \t]*\bffz\b *( *\w* *)/d
+s/\bcfs_ffs\b/ffs/g
+/#[ \t]*define[ \t]*\bffs\b *( *\w* *)[ \t]*\bffs\b *( *\w* *)/d
+s/\bcfs_fls\b/fls/g
+/#[ \t]*define[ \t]*\bfls\b *( *\w* *)[ \t]*\bfls\b *( *\w* *)/d
+
+################################################################################
+# file operations
+
+#s/\bcfs_file_t\b/file_t/g
+#s/\bcfs_dentry_t\b/dentry_t/g
+#s/\bcfs_dirent_t\b/dirent_t/g
+#s/\bcfs_kstatfs_t\b/kstatfs_t/g
+#s/\bcfs_filp_size\b/filp_size/g
+#s/\bcfs_filp_poff\b/filp_poff/g
+#s/\bcfs_filp_open\b/filp_open/g
+#/#[ \t]*define[ \t]*\bfilp_open\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bfilp_open\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_do_fsync\b/do_fsync/g
+#s/\bcfs_filp_close\b/filp_close/g
+#/#[ \t]*define[ \t]*\bfilp_close\b *( *\w* *, *\w* *)[ \t]*\bfilp_close\b *( *\w* *, *\w* *)/d
+#s/\bcfs_filp_read\b/filp_read/g
+#s/\bcfs_filp_write\b/filp_write/g
+#s/\bcfs_filp_fsync\b/filp_fsync/g
+#s/\bcfs_get_file\b/get_file/g
+#/#[ \t]*define[ \t]*\bget_file\b *( *\w* *)[ \t]*\bget_file\b *( *\w* *)/d
+#s/\bcfs_get_fd\b/fget/g
+#/#[ \t]*define[ \t]*\bfget\b *( *\w* *)[ \t]*\bfget\b *( *\w* *)/d
+#s/\bcfs_put_file\b/fput/g
+#/#[ \t]*define[ \t]*\bfput\b *( *\w* *)[ \t]*\bfput\b *( *\w* *)/d
+#s/\bcfs_file_count\b/file_count/g
+#/#[ \t]*define[ \t]*\bfile_count\b *( *\w* *)[ \t]*\bfile_count\b *( *\w* *)/d
+#s/\bCFS_INT_LIMIT\b/INT_LIMIT/g
+#s/\bCFS_OFFSET_MAX\b/OFFSET_MAX/g
+#s/\bcfs_flock_t\b/flock_t/g
+#s/\bcfs_flock_type\b/flock_type/g
+#s/\bcfs_flock_set_type\b/flock_set_type/g
+#s/\bcfs_flock_pid\b/flock_pid/g
+#s/\bcfs_flock_set_pid\b/flock_set_pid/g
+#s/\bcfs_flock_start\b/flock_start/g
+#s/\bcfs_flock_set_start\b/flock_set_start/g
+#s/\bcfs_flock_end\b/flock_end/g
+#s/\bcfs_flock_set_end\b/flock_set_end/g
+#s/\bcfs_user_write\b/user_write/g
+#s/\bCFS_IFSHIFT\b/IFSHIFT/g
+#s/\bCFS_IFTODT\b/IFTODT/g
+#s/\bCFS_DTTOIF\b/DTTOIF/g
+
+################################################################################
+# memory operations
+
+#s/\bcfs_page_t\b/page_t/g
+#s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
+#s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
+#s/\bCFS_PAGE_MASK\b/PAGE_CACHE_MASK/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_MASK\b[ \t]*\bPAGE_CACHE_MASK\b/d
+#s/\bcfs_num_physpages\b/num_physpages/g
+#/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
+#s/\bcfs_copy_from_user\b/copy_from_user/g
+#/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_copy_to_user\b/copy_to_user/g
+#/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_page_address\b/page_address/g
+#/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
+#s/\bcfs_kmap\b/kmap/g
+#/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
+#s/\bcfs_kunmap\b/kunmap/g
+#/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
+#s/\bcfs_get_page\b/get_page/g
+#/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
+#s/\bcfs_page_count\b/page_count/g
+#/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
+#s/\bcfs_page_index\b/page_index/g
+#/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
+#s/\bcfs_page_pin\b/page_cache_get/g
+#/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
+#s/\bcfs_page_unpin\b/page_cache_release/g
+#/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
+#s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
+#s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
+#s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
+#s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
+# memory allocator
+#s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
+#/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
+#s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
+#/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
+#s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
+#/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
+#s/\bCFS_ALLOC_FS\b/__GFP_FS/g
+#/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
+#s/\bCFS_ALLOC_IO\b/__GFP_IO/g
+#/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
+#s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
+#/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
+#s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
+#/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
+#s/\bCFS_ALLOC_USER\b/GFP_KERNEL/g
+#/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
+#s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
+#/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
+#s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
+#/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
+#s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
+#s/\bcfs_alloc\b/kmalloc/g
+#/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
+#s/\bcfs_free\b/kfree/g
+#/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
+#s/\bcfs_alloc_large\b/vmalloc/g
+#/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
+#s/\bcfs_free_large\b/vfree/g
+#/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
+#s/\bcfs_alloc_page\b/alloc_page/g
+#/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
+#s/\bcfs_free_page\b/__free_page/g
+#/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
+# TODO: SLAB allocator
+#s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
+#s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
+#s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
+#s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
+#/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
+#s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
+#/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
+#s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
+#/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
+#s/\bcfs_shrinker\b/shrinker/g
+#/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
+#s/\bcfs_shrinker_t\b/shrinker_t/g
+#/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
+#s/\bcfs_set_shrinker\b/set_shrinker/g
+#/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
+#s/\bcfs_remove_shrinker\b/remove_shrinker/g
+#/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
+#s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
+#/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+
+
+#s/\bcfs_\b//g
+#s/\bCFS_\b//g
+#/typedef[ \t]*\b\b[ \t]*\b\b/d
+#/#[ \t]*define[ \t]*\b\b[ \t]*\b\b/d
+#/#[ \t]*define[ \t]*\b\b *( *)[ \t]*\b\b *( *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *)[ \t]*\b\b *( *\w* *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *, *\w* *)[ \t]*\b\b *( *\w* *, *\w* *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *, *\w* *, *\w* *)[ \t]*\b\b *( *\w* *, *\w* *, *\w* *)/d
static inline
void cfs_bitmap_set(cfs_bitmap_t *bitmap, int nbit)
{
- cfs_set_bit(nbit, bitmap->data);
+ set_bit(nbit, bitmap->data);
}
static inline
void cfs_bitmap_clear(cfs_bitmap_t *bitmap, int nbit)
{
- cfs_test_and_clear_bit(nbit, bitmap->data);
+ test_and_clear_bit(nbit, bitmap->data);
}
static inline
int cfs_bitmap_check(cfs_bitmap_t *bitmap, int nbit)
{
- return cfs_test_bit(nbit, bitmap->data);
+ return test_bit(nbit, bitmap->data);
}
static inline
int cfs_bitmap_test_and_clear(cfs_bitmap_t *bitmap, int nbit)
{
- return cfs_test_and_clear_bit(nbit, bitmap->data);
+ return test_and_clear_bit(nbit, bitmap->data);
}
/* return 0 is bitmap has none set bits */
static inline
int cfs_bitmap_check_empty(cfs_bitmap_t *bitmap)
{
- return cfs_find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
+ return find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
}
static inline
new->size = newsize;
}
-#define cfs_foreach_bit(bitmap, pos) \
- for((pos)=cfs_find_first_bit((bitmap)->data, bitmap->size); \
- (pos) < (bitmap)->size; \
- (pos) = cfs_find_next_bit((bitmap)->data, (bitmap)->size, (pos)+1))
+#define cfs_foreach_bit(bitmap, pos) \
+ for ((pos) = find_first_bit((bitmap)->data, bitmap->size); \
+ (pos) < (bitmap)->size; \
+ (pos) = find_next_bit((bitmap)->data, (bitmap)->size, (pos) + 1))
#endif
/*
* spin_lock (use Linux kernel's primitives)
- *
+ *
* - spin_lock_init(x)
* - spin_lock(x)
* - spin_unlock(x)
* - spin_trylock(x)
- *
+ *
* - spin_lock_irqsave(x, f)
* - spin_unlock_irqrestore(x, f)
*/
struct cfs_hash_hlist_ops;
typedef union {
- cfs_rwlock_t rw; /**< rwlock */
- cfs_spinlock_t spin; /**< spinlock */
+ rwlock_t rw; /**< rwlock */
+ spinlock_t spin; /**< spinlock */
} cfs_hash_lock_t;
/**
cfs_hash_bucket_t **hs_rehash_buckets;
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
/** serialize debug members */
- cfs_spinlock_t hs_dep_lock;
+ spinlock_t hs_dep_lock;
/** max depth */
unsigned int hs_dep_max;
/** id of the deepest bucket */
/* exclusively locked */
unsigned int pcl_locked;
/* private lock table */
- cfs_spinlock_t **pcl_locks;
+ spinlock_t **pcl_locks;
};
/* return number of private locks */
*/
#include <linux/bitops.h>
-#define cfs_test_bit(nr, addr) test_bit(nr, addr)
-#define cfs_set_bit(nr, addr) set_bit(nr, addr)
-#define cfs_clear_bit(nr, addr) clear_bit(nr, addr)
-#define cfs_test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
-#define cfs_test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
-#define cfs_find_first_bit(addr, size) find_first_bit(addr, size)
-#define cfs_find_first_zero_bit(addr, size) find_first_zero_bit(addr, size)
-#define cfs_find_next_bit(addr, size, off) find_next_bit(addr, size, off)
-#define cfs_find_next_zero_bit(addr, size, off) \
- find_next_zero_bit(addr, size, off)
-#define cfs_ffz(x) ffz(x)
-#define cfs_ffs(x) ffs(x)
-#define cfs_fls(x) fls(x)
* spinlock "implementation"
*/
-typedef spinlock_t cfs_spinlock_t;
-
-#define cfs_spin_lock_init(lock) spin_lock_init(lock)
-#define cfs_spin_lock(lock) spin_lock(lock)
-#define cfs_spin_lock_bh(lock) spin_lock_bh(lock)
-#define cfs_spin_lock_bh_init(lock) spin_lock_bh_init(lock)
-#define cfs_spin_unlock(lock) spin_unlock(lock)
-#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
-#define cfs_spin_trylock(lock) spin_trylock(lock)
-#define cfs_spin_is_locked(lock) spin_is_locked(lock)
-
-#define cfs_spin_lock_irq(lock) spin_lock_irq(lock)
-#define cfs_spin_unlock_irq(lock) spin_unlock_irq(lock)
-#define cfs_read_lock_irqsave(lock, f) read_lock_irqsave(lock, f)
-#define cfs_write_lock_irqsave(lock, f) write_lock_irqsave(lock, f)
-#define cfs_write_unlock_irqrestore(lock, f) write_unlock_irqrestore(lock, f)
-#define cfs_spin_lock_irqsave(lock, f) spin_lock_irqsave(lock, f)
-#define cfs_spin_unlock_irqrestore(lock, f) spin_unlock_irqrestore(lock, f)
+
+
/*
* rw_semaphore "implementation" (use Linux kernel's primitives)
* - down_write(x)
* - up_write(x)
*/
-typedef struct rw_semaphore cfs_rw_semaphore_t;
-#define cfs_init_rwsem(s) init_rwsem(s)
-#define cfs_down_read(s) down_read(s)
-#define cfs_down_read_trylock(s) down_read_trylock(s)
-#define cfs_up_read(s) up_read(s)
-#define cfs_down_write(s) down_write(s)
-#define cfs_down_write_trylock(s) down_write_trylock(s)
-#define cfs_up_write(s) up_write(s)
-#define cfs_fini_rwsem(s) do {} while(0)
+#define fini_rwsem(s) do {} while (0)
-#define CFS_DECLARE_RWSEM(name) DECLARE_RWSEM(name)
/*
* rwlock_t "implementation" (use Linux kernel's primitives)
*
* - RW_LOCK_UNLOCKED
*/
-typedef rwlock_t cfs_rwlock_t;
-
-#define cfs_rwlock_init(lock) rwlock_init(lock)
-#define cfs_read_lock(lock) read_lock(lock)
-#define cfs_read_unlock(lock) read_unlock(lock)
-#define cfs_read_unlock_irqrestore(lock,flags) \
- read_unlock_irqrestore(lock, flags)
-#define cfs_write_lock(lock) write_lock(lock)
-#define cfs_write_unlock(lock) write_unlock(lock)
-#define cfs_write_lock_bh(lock) write_lock_bh(lock)
-#define cfs_write_unlock_bh(lock) write_unlock_bh(lock)
+
#ifndef DEFINE_RWLOCK
#define DEFINE_RWLOCK(lock) rwlock_t lock = __RW_LOCK_UNLOCKED(lock)
* - wait_for_completion_interruptible(c)
* - fini_completion(c)
*/
-typedef struct completion cfs_completion_t;
-
-#define CFS_DECLARE_COMPLETION(work) DECLARE_COMPLETION(work)
-#define CFS_INIT_COMPLETION(c) INIT_COMPLETION(c)
-#define CFS_COMPLETION_INITIALIZER(work) COMPLETION_INITIALIZER(work)
-#define cfs_init_completion(c) init_completion(c)
-#define cfs_complete(c) complete(c)
-#define cfs_wait_for_completion(c) wait_for_completion(c)
-#define cfs_wait_for_completion_interruptible(c) \
- wait_for_completion_interruptible(c)
-#define cfs_complete_and_exit(c, code) complete_and_exit(c, code)
-#define cfs_fini_completion(c) do { } while (0)
+#define fini_completion(c) do { } while (0)
/*
* semaphore "implementation" (use Linux kernel's primitives)
* - down_interruptible(sem)
* - down_trylock(sem)
*/
-typedef struct semaphore cfs_semaphore_t;
-
-#ifdef DEFINE_SEMAPHORE
-#define CFS_DEFINE_SEMAPHORE(name) DEFINE_SEMAPHORE(name)
-#else
-#define CFS_DEFINE_SEMAPHORE(name) DECLARE_MUTEX(name)
-#endif
-
-#define cfs_sema_init(sem, val) sema_init(sem, val)
-#define cfs_up(x) up(x)
-#define cfs_down(x) down(x)
-#define cfs_down_interruptible(x) down_interruptible(x)
-#define cfs_down_trylock(x) down_trylock(x)
/*
* mutex "implementation" (use Linux kernel's primitives)
* - mutex_is_locked(x)
* - mutex_destroy(x)
*/
-typedef struct mutex cfs_mutex_t;
-
-#define CFS_DEFINE_MUTEX(name) DEFINE_MUTEX(name)
-
-#define cfs_mutex_init(x) mutex_init(x)
-#define cfs_mutex_lock(x) mutex_lock(x)
-#define cfs_mutex_unlock(x) mutex_unlock(x)
-#define cfs_mutex_lock_interruptible(x) mutex_lock_interruptible(x)
-#define cfs_mutex_trylock(x) mutex_trylock(x)
-#define cfs_mutex_is_locked(x) mutex_is_locked(x)
-#define cfs_mutex_destroy(x) mutex_destroy(x)
#ifndef lockdep_set_class
*
**************************************************************************/
-typedef struct cfs_lock_class_key {
- ;
-} cfs_lock_class_key_t;
+struct lock_class_key {
+ ;
+};
-#define cfs_lockdep_set_class(lock, key) \
- do { (void)sizeof (lock);(void)sizeof (key); } while (0)
-/* This has to be a macro, so that `subclass' can be undefined in kernels that
- * do not support lockdep. */
+#define lockdep_set_class(lock, key) \
+ do { (void)sizeof(lock); (void)sizeof(key); } while (0)
+/* This has to be a macro, so that `subclass' can be undefined in kernels
+ * that do not support lockdep. */
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
{
}
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
{
}
#else
-typedef struct lock_class_key cfs_lock_class_key_t;
-#define cfs_lockdep_set_class(lock, key) lockdep_set_class(lock, key)
-#define cfs_lockdep_off() lockdep_off()
-#define cfs_lockdep_on() lockdep_on()
#endif /* lockdep_set_class */
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#ifndef mutex_lock_nested
-#define cfs_mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
-#else
-#define cfs_mutex_lock_nested(mutex, subclass) \
- mutex_lock_nested(mutex, subclass)
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
#endif
#ifndef spin_lock_nested
-#define cfs_spin_lock_nested(lock, subclass) spin_lock(lock)
-#else
-#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
#endif
#ifndef down_read_nested
-#define cfs_down_read_nested(lock, subclass) down_read(lock)
-#else
-#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
+#define down_read_nested(lock, subclass) down_read(lock)
#endif
#ifndef down_write_nested
-#define cfs_down_write_nested(lock, subclass) down_write(lock)
-#else
-#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
+#define down_write_nested(lock, subclass) down_write(lock)
#endif
-#else /* CONFIG_DEBUG_LOCK_ALLOC is defined */
-#define cfs_mutex_lock_nested(mutex, subclass) \
- mutex_lock_nested(mutex, subclass)
-#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
-#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
-#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
#endif /* CONFIG_DEBUG_LOCK_ALLOC */
// XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
#define SIGNAL_MASK_LOCK(task, flags) \
- spin_lock_irqsave(&task->sighand->siglock, flags)
+ spin_lock_irqsave(&task->sighand->siglock, flags)
#define SIGNAL_MASK_UNLOCK(task, flags) \
- spin_unlock_irqrestore(&task->sighand->siglock, flags)
+ spin_unlock_irqrestore(&task->sighand->siglock, flags)
#define USERMODEHELPER(path, argv, envp) \
- call_usermodehelper(path, argv, envp, 1)
+ call_usermodehelper(path, argv, envp, 1)
#define RECALC_SIGPENDING recalc_sigpending()
#define CLEAR_SIGPENDING clear_tsk_thread_flag(current, \
TIF_SIGPENDING)
};
struct upcall_cache {
- cfs_list_t uc_hashtable[UC_CACHE_HASH_SIZE];
- cfs_spinlock_t uc_lock;
- cfs_rwlock_t uc_upcall_rwlock;
-
- char uc_name[40]; /* for upcall */
- char uc_upcall[UC_CACHE_UPCALL_MAXPATH];
- int uc_acquire_expire; /* seconds */
- int uc_entry_expire; /* seconds */
- struct upcall_cache_ops *uc_ops;
+ cfs_list_t uc_hashtable[UC_CACHE_HASH_SIZE];
+ spinlock_t uc_lock;
+ rwlock_t uc_upcall_rwlock;
+
+ char uc_name[40]; /* for upcall */
+ char uc_upcall[UC_CACHE_UPCALL_MAXPATH];
+ int uc_acquire_expire; /* seconds */
+ int uc_entry_expire; /* seconds */
+ struct upcall_cache_ops *uc_ops;
};
struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
#define cfs_seq_open(file, ops, rc) (rc = seq_open(file, ops))
/* in lprocfs_stat.c, to protect the private data for proc entries */
-extern cfs_rw_semaphore_t _lprocfs_lock;
+extern struct rw_semaphore _lprocfs_lock;
/* to begin from 2.6.23, Linux defines self file_operations (proc_reg_file_ops)
* in procfs, the proc file_operation defined by Lustre (lprocfs_generic_fops)
*/
#ifndef HAVE_PROCFS_USERS
-#define LPROCFS_ENTRY() \
-do { \
- cfs_down_read(&_lprocfs_lock); \
+#define LPROCFS_ENTRY() \
+do { \
+ down_read(&_lprocfs_lock); \
} while(0)
-#define LPROCFS_EXIT() \
-do { \
- cfs_up_read(&_lprocfs_lock); \
+#define LPROCFS_EXIT() \
+do { \
+ up_read(&_lprocfs_lock); \
} while(0)
#else
static inline
int LPROCFS_ENTRY_AND_CHECK(struct proc_dir_entry *dp)
{
- int deleted = 0;
- spin_lock(&(dp)->pde_unload_lock);
- if (dp->proc_fops == NULL)
- deleted = 1;
- spin_unlock(&(dp)->pde_unload_lock);
- if (deleted)
- return -ENODEV;
- return 0;
+ int deleted = 0;
+
+ spin_lock(&(dp)->pde_unload_lock);
+ if (dp->proc_fops == NULL)
+ deleted = 1;
+ spin_unlock(&(dp)->pde_unload_lock);
+ if (deleted)
+ return -ENODEV;
+ return 0;
}
#else /* !HAVE_PROCFS_DELETED*/
static inline
up_read(&_lprocfs_lock); \
} while(0)
-#define LPROCFS_WRITE_ENTRY() \
-do { \
- cfs_down_write(&_lprocfs_lock); \
+#define LPROCFS_WRITE_ENTRY() \
+do { \
+ down_write(&_lprocfs_lock); \
} while(0)
-#define LPROCFS_WRITE_EXIT() \
-do { \
- cfs_up_write(&_lprocfs_lock); \
+#define LPROCFS_WRITE_EXIT() \
+do { \
+ up_write(&_lprocfs_lock); \
} while(0)
#else /* !LPROCFS */
size_t count;
loff_t index;
loff_t version;
- cfs_mutex_t lock;
+ struct mutex lock;
struct cfs_seq_operations *op;
void *private;
} cfs_seq_file_t;
#define __LIBCFS_USER_BITOPS_H__
/* test if bit nr is set in bitmap addr; returns previous value of bit nr */
-static __inline__ int cfs_test_and_set_bit(int nr, unsigned long *addr)
+static inline int test_and_set_bit(int nr, unsigned long *addr)
{
unsigned long mask;
return nr;
}
-#define cfs_set_bit(n, a) cfs_test_and_set_bit(n, a)
+#define set_bit(n, a) test_and_set_bit(n, a)
/* clear bit nr in bitmap addr; returns previous value of bit nr*/
-static __inline__ int cfs_test_and_clear_bit(int nr, unsigned long *addr)
+static inline int test_and_clear_bit(int nr, unsigned long *addr)
{
unsigned long mask;
return nr;
}
-#define cfs_clear_bit(n, a) cfs_test_and_clear_bit(n, a)
+#define clear_bit(n, a) test_and_clear_bit(n, a)
-static __inline__ int cfs_test_bit(int nr, const unsigned long *addr)
+static inline int test_bit(int nr, const unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG - 1))) &
((addr)[nr / BITS_PER_LONG])) != 0;
#define __cfs_ffz(x) __cfs_ffs(~(x))
#define __cfs_flz(x) __cfs_fls(~(x))
-unsigned long cfs_find_next_bit(unsigned long *addr,
- unsigned long size, unsigned long offset);
+unsigned long find_next_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset);
-unsigned long cfs_find_next_zero_bit(unsigned long *addr,
- unsigned long size, unsigned long offset);
+unsigned long find_next_zero_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset);
-#define cfs_find_first_bit(addr,size) (cfs_find_next_bit((addr),(size),0))
-#define cfs_find_first_zero_bit(addr,size) \
- (cfs_find_next_zero_bit((addr),(size),0))
+#define find_first_bit(addr, size) find_next_bit((addr), (size),0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size),0)
#endif
*/
/*
- * cfs_spin_lock
+ * spin_lock
*
- * - cfs_spin_lock_init(x)
- * - cfs_spin_lock(x)
- * - cfs_spin_unlock(x)
- * - cfs_spin_trylock(x)
- * - cfs_spin_lock_bh_init(x)
- * - cfs_spin_lock_bh(x)
- * - cfs_spin_unlock_bh(x)
+ * - spin_lock_init(x)
+ * - spin_lock(x)
+ * - spin_unlock(x)
+ * - spin_trylock(x)
+ * - spin_lock_bh_init(x)
+ * - spin_lock_bh(x)
+ * - spin_unlock_bh(x)
*
- * - cfs_spin_is_locked(x)
- * - cfs_spin_lock_irqsave(x, f)
- * - cfs_spin_unlock_irqrestore(x, f)
+ * - spin_is_locked(x)
+ * - spin_lock_irqsave(x, f)
+ * - spin_unlock_irqrestore(x, f)
*
* No-op implementation.
*/
-struct cfs_spin_lock {int foo;};
+struct spin_lock { int foo; };
-typedef struct cfs_spin_lock cfs_spinlock_t;
+typedef struct spin_lock spinlock_t;
-#define DEFINE_SPINLOCK(lock) cfs_spinlock_t lock = { }
-#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
-#define LASSERT_MUTEX_LOCKED(x) do {(void)sizeof(x);} while(0)
+#define DEFINE_SPINLOCK(lock) spinlock_t lock = { }
+#define LASSERT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
+#define LINVRNT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
+#define LASSERT_SEM_LOCKED(sem) do { (void)sizeof(sem); } while (0)
+#define LASSERT_MUTEX_LOCKED(x) do { (void)sizeof(x); } while (0)
-void cfs_spin_lock_init(cfs_spinlock_t *lock);
-void cfs_spin_lock(cfs_spinlock_t *lock);
-void cfs_spin_unlock(cfs_spinlock_t *lock);
-int cfs_spin_trylock(cfs_spinlock_t *lock);
-void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
-void cfs_spin_lock_bh(cfs_spinlock_t *lock);
-void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
+void spin_lock_init(spinlock_t *lock);
+void spin_lock(spinlock_t *lock);
+void spin_unlock(spinlock_t *lock);
+int spin_trylock(spinlock_t *lock);
+void spin_lock_bh_init(spinlock_t *lock);
+void spin_lock_bh(spinlock_t *lock);
+void spin_unlock_bh(spinlock_t *lock);
-static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
-static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
-static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
- unsigned long f){}
+static inline int spin_is_locked(spinlock_t *l) { return 1; }
+static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
+static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
/*
* Semaphore
*
- * - cfs_sema_init(x, v)
+ * - sema_init(x, v)
* - __down(x)
* - __up(x)
*/
-typedef struct cfs_semaphore {
- int foo;
-} cfs_semaphore_t;
+struct semaphore {
+ int foo;
+};
-void cfs_sema_init(cfs_semaphore_t *s, int val);
-void __up(cfs_semaphore_t *s);
-void __down(cfs_semaphore_t *s);
-int __down_interruptible(cfs_semaphore_t *s);
+void sema_init(struct semaphore *s, int val);
+void __up(struct semaphore *s);
+void __down(struct semaphore *s);
+int __down_interruptible(struct semaphore *s);
-#define CFS_DEFINE_SEMAPHORE(name) cfs_semaphore_t name = { 1 }
+#define DEFINE_SEMAPHORE(name) struct semaphore name = { 1 }
-#define cfs_up(s) __up(s)
-#define cfs_down(s) __down(s)
-#define cfs_down_interruptible(s) __down_interruptible(s)
+#define up(s) __up(s)
+#define down(s) __down(s)
+#define down_interruptible(s) __down_interruptible(s)
-static inline int cfs_down_trylock(cfs_semaphore_t *sem)
+static inline int down_trylock(struct semaphore *sem)
{
return 0;
}
/*
* Completion:
*
- * - cfs_init_completion_module(c)
- * - cfs_call_wait_handler(t)
- * - cfs_init_completion(c)
- * - cfs_complete(c)
- * - cfs_wait_for_completion(c)
- * - cfs_wait_for_completion_interruptible(c)
+ * - init_completion_module(c)
+ * - call_wait_handler(t)
+ * - init_completion(c)
+ * - complete(c)
+ * - wait_for_completion(c)
+ * - wait_for_completion_interruptible(c)
*/
-typedef struct {
- unsigned int done;
- cfs_waitq_t wait;
-} cfs_completion_t;
+struct completion {
+ unsigned int done;
+ cfs_waitq_t wait;
+};
-typedef int (*cfs_wait_handler_t) (int timeout);
-void cfs_init_completion_module(cfs_wait_handler_t handler);
-int cfs_call_wait_handler(int timeout);
-void cfs_init_completion(cfs_completion_t *c);
-void cfs_complete(cfs_completion_t *c);
-void cfs_wait_for_completion(cfs_completion_t *c);
-int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
+typedef int (*wait_handler_t) (int timeout);
+void init_completion_module(wait_handler_t handler);
+int call_wait_handler(int timeout);
+void init_completion(struct completion *c);
+void complete(struct completion *c);
+void wait_for_completion(struct completion *c);
+int wait_for_completion_interruptible(struct completion *c);
-#define CFS_COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+#define COMPLETION_INITIALIZER(work) \
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-#define CFS_DECLARE_COMPLETION(work) \
- cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
-#define CFS_INIT_COMPLETION(x) ((x).done = 0)
+#define INIT_COMPLETION(x) ((x).done = 0)
/*
- * cfs_rw_semaphore:
+ * rw_semaphore:
*
- * - cfs_init_rwsem(x)
- * - cfs_down_read(x)
- * - cfs_down_read_trylock(x)
- * - cfs_down_write(struct cfs_rw_semaphore *s);
- * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
- * - cfs_up_read(x)
- * - cfs_up_write(x)
- * - cfs_fini_rwsem(x)
+ * - init_rwsem(x)
+ * - down_read(x)
+ * - down_read_trylock(x)
+ * - down_write(struct rw_semaphore *s);
+ * - down_write_trylock(struct rw_semaphore *s);
+ * - up_read(x)
+ * - up_write(x)
+ * - fini_rwsem(x)
*/
-typedef struct cfs_rw_semaphore {
- int foo;
-} cfs_rw_semaphore_t;
-
-void cfs_init_rwsem(cfs_rw_semaphore_t *s);
-void cfs_down_read(cfs_rw_semaphore_t *s);
-int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
-void cfs_down_write(cfs_rw_semaphore_t *s);
-int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
-void cfs_up_read(cfs_rw_semaphore_t *s);
-void cfs_up_write(cfs_rw_semaphore_t *s);
-void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
-#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name = { }
+struct rw_semaphore {
+ int foo;
+};
+
+void init_rwsem(struct rw_semaphore *s);
+void down_read(struct rw_semaphore *s);
+int down_read_trylock(struct rw_semaphore *s);
+void down_write(struct rw_semaphore *s);
+int down_write_trylock(struct rw_semaphore *s);
+void up_read(struct rw_semaphore *s);
+void up_write(struct rw_semaphore *s);
+void fini_rwsem(struct rw_semaphore *s);
+#define DECLARE_RWSEM(name) struct rw_semaphore name = { }
/*
* read-write lock : Need to be investigated more!!
* XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
*
- * - cfs_rwlock_init(x)
- * - cfs_read_lock(x)
- * - cfs_read_unlock(x)
- * - cfs_write_lock(x)
- * - cfs_write_unlock(x)
- * - cfs_write_lock_irqsave(x)
- * - cfs_write_unlock_irqrestore(x)
- * - cfs_read_lock_irqsave(x)
- * - cfs_read_unlock_irqrestore(x)
+ * - rwlock_init(x)
+ * - read_lock(x)
+ * - read_unlock(x)
+ * - write_lock(x)
+ * - write_unlock(x)
+ * - write_lock_irqsave(x)
+ * - write_unlock_irqrestore(x)
+ * - read_lock_irqsave(x)
+ * - read_unlock_irqrestore(x)
*/
-typedef cfs_rw_semaphore_t cfs_rwlock_t;
-#define DEFINE_RWLOCK(lock) cfs_rwlock_t lock = { }
+#define rwlock_t struct rw_semaphore
+#define DEFINE_RWLOCK(lock) rwlock_t lock = { }
+
+#define rwlock_init(pl) init_rwsem(pl)
-#define cfs_rwlock_init(pl) cfs_init_rwsem(pl)
+#define read_lock(l) down_read(l)
+#define read_unlock(l) up_read(l)
+#define write_lock(l) down_write(l)
+#define write_unlock(l) up_write(l)
+
+static inline void write_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+ write_lock(l);
+}
-#define cfs_read_lock(l) cfs_down_read(l)
-#define cfs_read_unlock(l) cfs_up_read(l)
-#define cfs_write_lock(l) cfs_down_write(l)
-#define cfs_write_unlock(l) cfs_up_write(l)
+static inline void write_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+ write_unlock(l);
+}
-static inline void
-cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
-static inline void
-cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
+static inline void read_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+ read_lock(l);
+}
-static inline void
-cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
-static inline void
-cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
+static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+ read_unlock(l);
+}
/*
* Atomic for single-threaded user-space
int c_done;
pthread_cond_t c_cond;
pthread_mutex_t c_mut;
-} cfs_mt_completion_t;
+} mt_completion_t;
-void cfs_mt_init_completion(cfs_mt_completion_t *c);
-void cfs_mt_fini_completion(cfs_mt_completion_t *c);
-void cfs_mt_complete(cfs_mt_completion_t *c);
-void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
+void mt_init_completion(mt_completion_t *c);
+void mt_fini_completion(mt_completion_t *c);
+void mt_complete(mt_completion_t *c);
+void mt_wait_for_completion(mt_completion_t *c);
/*
* Multi-threaded user space atomic APIs
*/
-typedef struct { volatile int counter; } cfs_mt_atomic_t;
+typedef struct { volatile int counter; } mt_atomic_t;
-int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
-int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
-void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
+int mt_atomic_read(mt_atomic_t *a);
+void mt_atomic_set(mt_atomic_t *a, int b);
+int mt_atomic_dec_and_test(mt_atomic_t *a);
+void mt_atomic_inc(mt_atomic_t *a);
+void mt_atomic_dec(mt_atomic_t *a);
+void mt_atomic_add(int b, mt_atomic_t *a);
+void mt_atomic_sub(int b, mt_atomic_t *a);
#endif /* HAVE_LIBPTHREAD */
* Mutex interface.
*
**************************************************************************/
-typedef struct cfs_semaphore cfs_mutex_t;
+#define mutex semaphore
-#define CFS_DEFINE_MUTEX(m) CFS_DEFINE_SEMAPHORE(m)
+#define DEFINE_MUTEX(m) DEFINE_SEMAPHORE(m)
-static inline void cfs_mutex_init(cfs_mutex_t *mutex)
+static inline void mutex_init(struct mutex *mutex)
{
- cfs_sema_init(mutex, 1);
+ sema_init(mutex, 1);
}
-static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
+static inline void mutex_lock(struct mutex *mutex)
{
- cfs_down(mutex);
+ down(mutex);
}
-static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
+static inline void mutex_unlock(struct mutex *mutex)
{
- cfs_up(mutex);
+ up(mutex);
}
-static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
+static inline int mutex_lock_interruptible(struct mutex *mutex)
{
- return cfs_down_interruptible(mutex);
+ return down_interruptible(mutex);
}
/**
* \retval 1 try-lock succeeded (lock acquired).
* \retval 0 indicates lock contention.
*/
-static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
+static inline int mutex_trylock(struct mutex *mutex)
{
- return !cfs_down_trylock(mutex);
+ return !down_trylock(mutex);
}
-static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
+static inline void mutex_destroy(struct mutex *lock)
{
}
*
* \retval 0 mutex is not locked. This should never happen.
*/
-static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
+static inline int mutex_is_locked(struct mutex *lock)
{
return 1;
}
*
**************************************************************************/
-typedef struct cfs_lock_class_key {
+struct lock_class_key {
int foo;
-} cfs_lock_class_key_t;
+};
-static inline void cfs_lockdep_set_class(void *lock,
- cfs_lock_class_key_t *key)
+static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
{
}
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
{
}
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
{
}
-#define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
-#define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
-#define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
-#define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
+#define down_read_nested(lock, subclass) down_read(lock)
+#define down_write_nested(lock, subclass) down_write(lock)
/* !__KERNEL__ */
unsigned long simple_strtoul(const char *cp,char **endp, unsigned int base);
-static inline int cfs_set_bit(int nr, void * addr)
+static inline int set_bit(int nr, void * addr)
{
(((volatile ULONG *) addr)[nr >> 5]) |= (1UL << (nr & 31));
return *((int *) addr);
}
-static inline int cfs_test_bit(int nr, void * addr)
+static inline int test_bit(int nr, void * addr)
{
return (int)(((1UL << (nr & 31)) & (((volatile ULONG *) addr)[nr >> 5])) != 0);
}
-static inline int cfs_clear_bit(int nr, void * addr)
+static inline int clear_bit(int nr, void * addr)
{
(((volatile ULONG *) addr)[nr >> 5]) &= (~(1UL << (nr & 31)));
return *((int *) addr);
}
-static inline int cfs_test_and_set_bit(int nr, volatile void *addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
{
int rc;
unsigned char mask;
return rc;
}
-#define ext2_set_bit(nr,addr) (cfs_set_bit(nr, addr), 0)
-#define ext2_clear_bit(nr,addr) (cfs_clear_bit(nr, addr), 0)
-#define ext2_test_bit(nr,addr) cfs_test_bit(nr, addr)
+#define ext2_set_bit(nr, addr) (set_bit(nr, addr), 0)
+#define ext2_clear_bit(nr, addr) (clear_bit(nr, addr), 0)
+#define ext2_test_bit(nr, addr) test_bit(nr, addr)
-static inline int cfs_ffs(int x)
+static inline int ffs(int x)
{
int r = 1;
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline
-int cfs_fls(int x)
+int fls(int x)
{
int r = 32;
return r;
}
-static inline unsigned cfs_find_first_bit(const unsigned long *addr,
+static inline unsigned find_first_bit(const unsigned long *addr,
unsigned size)
{
unsigned x = 0;
int i_uid;
int i_gid;
__u32 i_flags;
- cfs_mutex_t i_sem;
+ struct mutex i_sem;
void * i_priv;
};
* spinlock & event definitions
*/
-typedef struct cfs_spin_lock cfs_spinlock_t;
+typedef struct spin_lock spinlock_t;
/* atomic */
#define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v)
#define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v)
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock);
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock);
/* event */
*
*/
-struct cfs_spin_lock {
- KSPIN_LOCK lock;
- KIRQL irql;
+struct spin_lock {
+ KSPIN_LOCK lock;
+ KIRQL irql;
};
-#define CFS_DECL_SPIN(name) cfs_spinlock_t name;
-#define CFS_DECL_SPIN_EXTERN(name) extern cfs_spinlock_t name;
+#define CFS_DECL_SPIN(name) spinlock_t name;
+#define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
#define DEFINE_SPINLOCK {0}
-static inline void cfs_spin_lock_init(cfs_spinlock_t *lock)
+static inline void spin_lock_init(spinlock_t *lock)
{
- KeInitializeSpinLock(&(lock->lock));
+ KeInitializeSpinLock(&(lock->lock));
}
-static inline void cfs_spin_lock(cfs_spinlock_t *lock)
+static inline void spin_lock(spinlock_t *lock)
{
- KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+ KeAcquireSpinLock(&(lock->lock), &(lock->irql));
}
-static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass)
+static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
{
- KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+ KeAcquireSpinLock(&(lock->lock), &(lock->irql));
}
-static inline void cfs_spin_unlock(cfs_spinlock_t *lock)
+static inline void spin_unlock(spinlock_t *lock)
{
- KIRQL irql = lock->irql;
- KeReleaseSpinLock(&(lock->lock), irql);
+ KIRQL irql = lock->irql;
+ KeReleaseSpinLock(&(lock->lock), irql);
}
-#define cfs_spin_lock_irqsave(lock, flags) \
-do {(flags) = 0; cfs_spin_lock(lock);} while(0)
+#define spin_lock_irqsave(lock, flags) \
+ do { (flags) = 0; spin_lock(lock); } while (0)
-#define cfs_spin_unlock_irqrestore(lock, flags) \
-do {cfs_spin_unlock(lock);} while(0)
+#define spin_unlock_irqrestore(lock, flags) \
+ do { spin_unlock(lock); } while (0)
/* There's no corresponding routine in windows kernel.
extern int libcfs_mp_system;
-static int cfs_spin_trylock(cfs_spinlock_t *lock)
+static int spin_trylock(spinlock_t *lock)
{
- KIRQL Irql;
- int rc = 0;
+ KIRQL Irql;
+ int rc = 0;
- ASSERT(lock != NULL);
+ ASSERT(lock != NULL);
- KeRaiseIrql(DISPATCH_LEVEL, &Irql);
+ KeRaiseIrql(DISPATCH_LEVEL, &Irql);
- if (libcfs_mp_system) {
- if (0 == (ulong_ptr_t)lock->lock) {
+ if (libcfs_mp_system) {
+ if (0 == (ulong_ptr_t)lock->lock) {
#if _X86_
- __asm {
- mov edx, dword ptr [ebp + 8]
- lock bts dword ptr[edx], 0
- jb lock_failed
- mov rc, TRUE
- lock_failed:
- }
+ __asm {
+ mov edx, dword ptr [ebp + 8]
+ lock bts dword ptr[edx], 0
+ jb lock_failed
+ mov rc, TRUE
+ lock_failed:
+ }
#else
- KdBreakPoint();
+ KdBreakPoint();
#endif
- }
- } else {
- rc = TRUE;
- }
+ }
+ } else {
+ rc = TRUE;
+ }
- if (rc) {
- lock->irql = Irql;
- } else {
- KeLowerIrql(Irql);
- }
+ if (rc) {
+ lock->irql = Irql;
+ } else {
+ KeLowerIrql(Irql);
+ }
- return rc;
+ return rc;
}
-static int cfs_spin_is_locked(cfs_spinlock_t *lock)
+static int spin_is_locked(spinlock_t *lock)
{
#if _WIN32_WINNT >= 0x502
- /* KeTestSpinLock only avalilable on 2k3 server or later */
- return (!KeTestSpinLock(&lock->lock));
+ /* KeTestSpinLock only avalilable on 2k3 server or later */
+ return !KeTestSpinLock(&lock->lock);
#else
- return (int) (lock->lock);
+ return (int) (lock->lock);
#endif
}
/* synchronization between cpus: it will disable all DPCs
kernel task scheduler on the CPU */
-#define cfs_spin_lock_bh(x) cfs_spin_lock(x)
-#define cfs_spin_unlock_bh(x) cfs_spin_unlock(x)
-#define cfs_spin_lock_bh_init(x) cfs_spin_lock_init(x)
+#define spin_lock_bh(x) spin_lock(x)
+#define spin_unlock_bh(x) spin_unlock(x)
+#define spin_lock_bh_init(x) spin_lock_init(x)
/*
- * cfs_rw_semaphore (using ERESOURCE)
+ * rw_semaphore (using ERESOURCE)
*/
-typedef struct cfs_rw_semaphore {
- ERESOURCE rwsem;
-} cfs_rw_semaphore_t;
+struct rw_semaphore {
+ ERESOURCE rwsem;
+};
-#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name
-#define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name
+#define DECLARE_RWSEM(name) struct rw_semaphore name
+#define CFS_DECLARE_RWSEM_EXTERN(name) extern struct rw_semaphore name
/*
- * cfs_init_rwsem
- * To initialize the the cfs_rw_semaphore_t structure
+ * init_rwsem
+ * To initialize the the rw_semaphore structure
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the rw_semaphore structure
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
+static inline void init_rwsem(struct rw_semaphore *s)
{
ExInitializeResourceLite(&s->rwsem);
}
-#define rwsem_init cfs_init_rwsem
+#define rwsem_init init_rwsem
/*
- * cfs_fini_rwsem
- * To finilize/destroy the the cfs_rw_semaphore_t structure
+ * fini_rwsem
+ * To finilize/destroy the the rw_semaphore structure
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the rw_semaphore structure
*
* Return Value:
* N/A
* Just define it NULL for other systems.
*/
-static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
+static inline void fini_rwsem(struct rw_semaphore *s)
{
- ExDeleteResourceLite(&s->rwsem);
+ ExDeleteResourceLite(&s->rwsem);
}
/*
- * cfs_down_read
- * To acquire read-lock of the cfs_rw_semaphore
+ * down_read
+ * To acquire read-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_down_read(cfs_rw_semaphore_t *s)
+static inline void down_read(struct rw_semaphore *s)
{
ExAcquireResourceSharedLite(&s->rwsem, TRUE);
}
-#define cfs_down_read_nested cfs_down_read
+#define down_read_nested down_read
/*
- * cfs_down_read_trylock
- * To acquire read-lock of the cfs_rw_semaphore without blocking
+ * down_read_trylock
+ * To acquire read-lock of the rw_semaphore without blocking
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* Zero: failed to acquire the read lock
* This routine will return immediately without waiting.
*/
-static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
+static inline int down_read_trylock(struct rw_semaphore *s)
{
return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
}
/*
- * cfs_down_write
- * To acquire write-lock of the cfs_rw_semaphore
+ * down_write
+ * To acquire write-lock of the struct rw_semaphore
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_down_write(cfs_rw_semaphore_t *s)
+static inline void down_write(struct rw_semaphore *s)
{
ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
}
-#define cfs_down_write_nested cfs_down_write
+#define down_write_nested down_write
/*
* down_write_trylock
- * To acquire write-lock of the cfs_rw_semaphore without blocking
+ * To acquire write-lock of the rw_semaphore without blocking
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* Zero: failed to acquire the write lock
* This routine will return immediately without waiting.
*/
-static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
+static inline int down_write_trylock(struct rw_semaphore *s)
{
- return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
+ return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
}
/*
- * cfs_up_read
- * To release read-lock of the cfs_rw_semaphore
+ * up_read
+ * To release read-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_up_read(cfs_rw_semaphore_t *s)
+static inline void up_read(struct rw_semaphore *s)
{
- ExReleaseResourceForThreadLite(
- &(s->rwsem),
- ExGetCurrentResourceThread());
+ ExReleaseResourceForThreadLite(&(s->rwsem),
+ ExGetCurrentResourceThread());
}
/*
- * cfs_up_write
- * To release write-lock of the cfs_rw_semaphore
+ * up_write
+ * To release write-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_up_write(cfs_rw_semaphore_t *s)
+static inline void up_write(struct rw_semaphore *s)
{
- ExReleaseResourceForThreadLite(
- &(s->rwsem),
- ExGetCurrentResourceThread());
+ ExReleaseResourceForThreadLite(&(s->rwsem),
+ ExGetCurrentResourceThread());
}
/*
*/
typedef struct {
- cfs_spinlock_t guard;
- int count;
-} cfs_rwlock_t;
+ spinlock_t guard;
+ int count;
+} rwlock_t;
-void cfs_rwlock_init(cfs_rwlock_t * rwlock);
-void cfs_rwlock_fini(cfs_rwlock_t * rwlock);
+void rwlock_init(rwlock_t *rwlock);
+void cfs_rwlock_fini(rwlock_t *rwlock);
-void cfs_read_lock(cfs_rwlock_t * rwlock);
-void cfs_read_unlock(cfs_rwlock_t * rwlock);
-void cfs_write_lock(cfs_rwlock_t * rwlock);
-void cfs_write_unlock(cfs_rwlock_t * rwlock);
+void read_lock(rwlock_t *rwlock);
+void read_unlock(rwlock_t *rwlock);
+void write_lock(rwlock_t *rwlock);
+void write_unlock(rwlock_t *rwlock);
-#define cfs_write_lock_irqsave(l, f) do {f = 0; cfs_write_lock(l);} while(0)
-#define cfs_write_unlock_irqrestore(l, f) do {cfs_write_unlock(l);} while(0)
-#define cfs_read_lock_irqsave(l, f do {f=0; cfs_read_lock(l);} while(0)
-#define cfs_read_unlock_irqrestore(l, f) do {cfs_read_unlock(l);} while(0)
+#define write_lock_irqsave(l, f) do { f = 0; write_lock(l); } while (0)
+#define write_unlock_irqrestore(l, f) do { write_unlock(l); } while (0)
+#define read_lock_irqsave(l, f) do { f = 0; read_lock(l); } while (0)
+#define read_unlock_irqrestore(l, f) do { read_unlock(l); } while (0)
-#define cfs_write_lock_bh cfs_write_lock
-#define cfs_write_unlock_bh cfs_write_unlock
+#define write_lock_bh write_lock
+#define write_unlock_bh write_unlock
-typedef struct cfs_lock_class_key {
- int foo;
-} cfs_lock_class_key_t;
+struct lock_class_key {
+ int foo;
+};
-#define cfs_lockdep_set_class(lock, class) do {} while(0)
+#define lockdep_set_class(lock, class) do {} while (0)
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
{
}
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
{
}
* - __up(x)
*/
-typedef struct cfs_semaphore {
+struct semaphore {
KSEMAPHORE sem;
-} cfs_semaphore_t;
+};
-static inline void cfs_sema_init(cfs_semaphore_t *s, int val)
+static inline void sema_init(struct semaphore *s, int val)
{
KeInitializeSemaphore(&s->sem, val, val);
}
-static inline void __down(cfs_semaphore_t *s)
+static inline void __down(struct semaphore *s)
{
- KeWaitForSingleObject( &(s->sem), Executive,
- KernelMode, FALSE, NULL );
+ KeWaitForSingleObject(&(s->sem), Executive, KernelMode, FALSE, NULL);
}
-static inline void __up(cfs_semaphore_t *s)
+static inline void __up(struct semaphore *s)
{
KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
}
-static inline int down_trylock(cfs_semaphore_t *s)
+static inline int down_trylock(struct semaphore *s)
{
- LARGE_INTEGER timeout = {0};
- NTSTATUS status =
- KeWaitForSingleObject( &(s->sem), Executive,
- KernelMode, FALSE, &timeout);
+ LARGE_INTEGER timeout = {0};
+ NTSTATUS status = KeWaitForSingleObject(&(s->sem), Executive,
+ KernelMode, FALSE, &timeout);
- if (status == STATUS_SUCCESS) {
- return 0;
- }
+ if (status == STATUS_SUCCESS)
+ return 0;
- return 1;
+ return 1;
}
/*
* - mutex_down(x)
*/
-typedef struct cfs_semaphore cfs_mutex_t;
+#define mutex semaphore
-#define CFS_DECLARE_MUTEX(x) cfs_mutex_t x
+#define CFS_DECLARE_MUTEX(x) struct mutex x
/*
* init_mutex
* Notes:
* N/A
*/
-#define cfs_mutex_init cfs_init_mutex
-static inline void cfs_init_mutex(cfs_mutex_t *mutex)
+#define mutex_init cfs_init_mutex
+static inline void cfs_init_mutex(struct mutex *mutex)
{
- cfs_sema_init(mutex, 1);
+ sema_init(mutex, 1);
}
/*
* N/A
*/
-static inline void cfs_mutex_down(cfs_mutex_t *mutex)
+static inline void cfs_mutex_down(struct mutex *mutex)
{
- __down(mutex);
+ __down(mutex);
}
-static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex)
+static inline int cfs_mutex_down_interruptible(struct mutex *mutex)
{
- __down(mutex);
- return 0;
+ __down(mutex);
+ return 0;
}
-#define cfs_mutex_lock(m) cfs_mutex_down(m)
-#define cfs_mutex_trylock(s) down_trylock(s)
-#define cfs_mutex_lock_nested(m) cfs_mutex_down(m)
-#define cfs_down(m) cfs_mutex_down(m)
-#define cfs_down_interruptible(m) cfs_mutex_down_interruptible(m)
+#define mutex_lock(m) cfs_mutex_down(m)
+#define mutex_trylock(s) down_trylock(s)
+#define mutex_lock_nested(m) cfs_mutex_down(m)
+#define down(m) cfs_mutex_down(m)
+#define down_interruptible(m) cfs_mutex_down_interruptible(m)
/*
* mutex_up
* N/A
*/
-static inline void cfs_mutex_up(cfs_mutex_t *mutex)
+static inline void cfs_mutex_up(struct mutex *mutex)
{
- __up(mutex);
+ __up(mutex);
}
-#define cfs_mutex_unlock(m) cfs_mutex_up(m)
-#define cfs_up(m) cfs_mutex_up(m)
+#define mutex_unlock(m) cfs_mutex_up(m)
+#define up(m) cfs_mutex_up(m)
/*
* init_mutex_locked
* N/A
*/
-static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex)
+static inline void cfs_init_mutex_locked(struct mutex *mutex)
{
- cfs_init_mutex(mutex);
- cfs_mutex_down(mutex);
+ cfs_init_mutex(mutex);
+ cfs_mutex_down(mutex);
}
-static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
+static inline void mutex_destroy(struct mutex *mutex)
{
}
* - wait_for_completion(c)
*/
-typedef struct {
+struct completion{
event_t event;
-} cfs_completion_t;
+};
/*
* N/A
*/
-static inline void cfs_init_completion(cfs_completion_t *c)
+static inline void init_completion(struct completion *c)
{
cfs_init_event(&(c->event), 1, FALSE);
}
* N/A
*/
-static inline void cfs_complete(cfs_completion_t *c)
+static inline void complete(struct completion *c)
{
cfs_wake_event(&(c->event));
}
* N/A
*/
-static inline void cfs_wait_for_completion(cfs_completion_t *c)
+static inline void wait_for_completion(struct completion *c)
{
- cfs_wait_event_internal(&(c->event), 0);
+ cfs_wait_event_internal(&(c->event), 0);
}
-static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
+static inline int wait_for_completion_interruptible(struct completion *c)
{
- cfs_wait_event_internal(&(c->event), 0);
- return 0;
+ cfs_wait_event_internal(&(c->event), 0);
+ return 0;
}
-#else /* !__KERNEL__ */
#endif /* !__KERNEL__ */
#endif
/* Make it prettier to test the above... */
#define UnlockPage(page) unlock_page(page)
-#define Page_Uptodate(page) cfs_test_bit(PG_uptodate, &(page)->flags)
-#define SetPageUptodate(page) \
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+#define SetPageUptodate(page) \
do { \
arch_set_page_uptodate(page); \
- cfs_set_bit(PG_uptodate, &(page)->flags); \
+ set_bit(PG_uptodate, &(page)->flags); \
} while (0)
-#define ClearPageUptodate(page) cfs_clear_bit(PG_uptodate, &(page)->flags)
-#define PageDirty(page) cfs_test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page) cfs_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page) cfs_clear_bit(PG_dirty, &(page)->flags)
-#define PageLocked(page) cfs_test_bit(PG_locked, &(page)->flags)
-#define LockPage(page) cfs_set_bit(PG_locked, &(page)->flags)
-#define TryLockPage(page) cfs_test_and_set_bit(PG_locked, &(page)->flags)
-#define PageChecked(page) cfs_test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page) cfs_set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) cfs_clear_bit(PG_checked, &(page)->flags)
-#define PageLaunder(page) cfs_test_bit(PG_launder, &(page)->flags)
-#define SetPageLaunder(page) cfs_set_bit(PG_launder, &(page)->flags)
-#define ClearPageLaunder(page) cfs_clear_bit(PG_launder, &(page)->flags)
-#define ClearPageArch1(page) cfs_clear_bit(PG_arch_1, &(page)->flags)
-
-#define PageError(page) cfs_test_bit(PG_error, &(page)->flags)
-#define SetPageError(page) cfs_set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page) cfs_clear_bit(PG_error, &(page)->flags)
-#define PageReferenced(page) cfs_test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) cfs_set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) cfs_clear_bit(PG_referenced, &(page)->flags)
-
-#define PageActive(page) cfs_test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page) cfs_set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page) cfs_clear_bit(PG_active, &(page)->flags)
-
-#define PageWriteback(page) cfs_test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) cfs_test_and_set_bit(PG_writeback, \
+#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
+#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
+#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
+#define LockPage(page) set_bit(PG_locked, &(page)->flags)
+#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
+#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
+#define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
+
+#define PageError(page) test_bit(PG_error, &(page)->flags)
+#define SetPageError(page) set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
+#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
+
+#define PageActive(page) test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
+
+#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
+#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \
&(page)->flags)
-#define TestClearPageWriteback(page) cfs_test_and_clear_bit(PG_writeback, \
+#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
&(page)->flags)
#define __GFP_FS (1)
size_t count;
loff_t index;
u32 version;
- cfs_mutex_t lock;
+ struct mutex lock;
const struct seq_operations *op;
void *private;
};
#define CFS_WAITLINK_MAGIC 'CWLM'
typedef struct cfs_waitq {
+ unsigned int magic;
+ unsigned int flags;
- unsigned int magic;
- unsigned int flags;
-
- cfs_spinlock_t guard;
- cfs_list_t waiters;
+ spinlock_t guard;
+ cfs_list_t waiters;
} cfs_waitq_t;
#define TASKSLT_MAGIC 'TSLT' /* Task Slot */
typedef struct _TASK_MAN {
+ ULONG Magic; /* Magic and Flags */
+ ULONG Flags;
- ULONG Magic; /* Magic and Flags */
- ULONG Flags;
-
- cfs_spinlock_t Lock; /* Protection lock */
-
- cfs_mem_cache_t *slab; /* Memory slab for task slot */
+ spinlock_t Lock; /* Protection lock */
- ULONG NumOfTasks; /* Total tasks (threads) */
- LIST_ENTRY TaskList; /* List of task slots */
+ cfs_mem_cache_t *slab; /* Memory slab for task slot */
+ ULONG NumOfTasks; /* Total tasks (threads) */
+ LIST_ENTRY TaskList; /* List of task slots */
} TASK_MAN, *PTASK_MAN;
typedef struct _TASK_SLOT {
} KS_TSDU_MDL, *PKS_TSDU_MDL;
typedef struct ks_engine_mgr {
- cfs_spinlock_t lock;
- int stop;
- event_t exit;
- event_t start;
- cfs_list_t list;
+ spinlock_t lock;
+ int stop;
+ event_t exit;
+ event_t start;
+ cfs_list_t list;
} ks_engine_mgr_t;
typedef struct ks_engine_slot {
} ks_engine_slot_t;
typedef struct _KS_TSDUMGR {
- cfs_list_t TsduList;
- ULONG NumOfTsdu;
- ULONG TotalBytes;
- KEVENT Event;
- cfs_spinlock_t Lock;
- ks_engine_slot_t Slot;
- ULONG Payload;
- int Busy:1;
- int OOB:1;
+ cfs_list_t TsduList;
+ ULONG NumOfTsdu;
+ ULONG TotalBytes;
+ KEVENT Event;
+ spinlock_t Lock;
+ ks_engine_slot_t Slot;
+ ULONG Payload;
+ int Busy:1;
+ int OOB:1;
} KS_TSDUMGR, *PKS_TSDUMGR;
-#define ks_lock_tsdumgr(mgr) cfs_spin_lock(&((mgr)->Lock))
-#define ks_unlock_tsdumgr(mgr) cfs_spin_unlock(&((mgr)->Lock))
+#define ks_lock_tsdumgr(mgr) spin_lock(&((mgr)->Lock))
+#define ks_unlock_tsdumgr(mgr) spin_unlock(&((mgr)->Lock))
typedef struct _KS_CHAIN {
KS_TSDUMGR Normal; /* normal queue */
ulong kstc_magic; /* Magic & Flags */
ulong kstc_flags;
- cfs_spinlock_t kstc_lock; /* serialise lock*/
+ spinlock_t kstc_lock; /* serialise lock*/
void * kstc_conn; /* ks_conn_t */
ks_tconn_type_t kstc_type; /* tdi connection Type */
} ks_addr_slot_t;
typedef struct {
+ /*
+ * Tdi client information
+ */
- /*
- * Tdi client information
- */
+ UNICODE_STRING ksnd_client_name; /* tdi client module name */
+ HANDLE ksnd_pnp_handle; /* the handle for pnp changes */
- UNICODE_STRING ksnd_client_name; /* tdi client module name */
- HANDLE ksnd_pnp_handle; /* the handle for pnp changes */
-
- cfs_spinlock_t ksnd_addrs_lock; /* serialize ip address list access */
+ spinlock_t ksnd_addrs_lock; /* serialize ip address list */
LIST_ENTRY ksnd_addrs_list; /* list of the ip addresses */
int ksnd_naddrs; /* number of the ip addresses */
TDI_PROVIDER_INFO ksnd_provider; /* tdi tcp/ip provider's information */
- cfs_spinlock_t ksnd_tconn_lock; /* tdi connections access serialise */
+ spinlock_t ksnd_tconn_lock; /* tdi connections access lock*/
+
+ int ksnd_ntconns; /* number of tconns in list */
+ cfs_list_t ksnd_tconns; /* tdi connections list */
+ cfs_mem_cache_t *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/
+ event_t ksnd_tconn_exit; /* event signal by last tconn */
- int ksnd_ntconns; /* number of tconns attached in list */
- cfs_list_t ksnd_tconns; /* tdi connections list */
- cfs_mem_cache_t * ksnd_tconn_slab; /* slabs for ks_tconn_t allocations */
- event_t ksnd_tconn_exit; /* exit event to be signaled by the last tconn */
+ spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
- cfs_spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
-
int ksnd_ntsdus; /* number of tsdu buffers allocated */
ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */
cfs_mem_cache_t *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
};
static struct cfs_zone_nob cfs_zone_nob;
-static spinlock_t cfs_zone_guard;
+static spinlock_t cfs_zone_guard;
cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
{
- cfs_mem_cache_t *walker = NULL;
+ cfs_mem_cache_t *walker = NULL;
- LASSERT(cfs_zone_nob.z_nob != NULL);
+ LASSERT(cfs_zone_nob.z_nob != NULL);
- spin_lock(&cfs_zone_guard);
- list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
- if (!strcmp(walker->mc_name, name) && \
- walker->mc_size == objsize)
- break;
- }
- spin_unlock(&cfs_zone_guard);
+ spin_lock(&cfs_zone_guard);
+ list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
+ if (!strcmp(walker->mc_name, name) && \
+ walker->mc_size == objsize)
+ break;
+ }
+ spin_unlock(&cfs_zone_guard);
- return walker;
+ return walker;
}
/*
void raw_page_death_row_clean(void)
{
- struct xnu_raw_page *pg;
+ struct xnu_raw_page *pg;
- spin_lock(&page_death_row_phylax);
- while (!list_empty(&page_death_row)) {
- pg = container_of(page_death_row.next,
- struct xnu_raw_page, link);
- list_del(&pg->link);
- spin_unlock(&page_death_row_phylax);
- raw_page_finish(pg);
- spin_lock(&page_death_row_phylax);
- }
- spin_unlock(&page_death_row_phylax);
+ spin_lock(&page_death_row_phylax);
+ while (!list_empty(&page_death_row)) {
+ pg = container_of(page_death_row.next,
+ struct xnu_raw_page, link);
+ list_del(&pg->link);
+ spin_unlock(&page_death_row_phylax);
+ raw_page_finish(pg);
+ spin_lock(&page_death_row_phylax);
+ }
+ spin_unlock(&page_death_row_phylax);
}
/* Free a "page" */
{
if (!atomic_dec_and_test(&pg->count))
return;
- /*
- * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
- * block. (raw_page_done()->upl_abort() can block too) On the other
- * hand, cfs_free_page() may be called in non-blockable context. To
- * work around this, park pages on global list when cannot block.
- */
- if (get_preemption_level() > 0) {
- spin_lock(&page_death_row_phylax);
- list_add(&pg->link, &page_death_row);
- spin_unlock(&page_death_row_phylax);
- } else {
- raw_page_finish(pg);
- raw_page_death_row_clean();
- }
+ /*
+ * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
+ * block. (raw_page_done()->upl_abort() can block too) On the other
+ * hand, cfs_free_page() may be called in non-blockable context. To
+ * work around this, park pages on global list when cannot block.
+ */
+ if (get_preemption_level() > 0) {
+ spin_lock(&page_death_row_phylax);
+ list_add(&pg->link, &page_death_row);
+ spin_unlock(&page_death_row_phylax);
+ } else {
+ raw_page_finish(pg);
+ raw_page_death_row_clean();
+ }
}
cfs_page_t *cfs_alloc_page(u_int32_t flags)
cfs_zone_nob.z_nob = nob->z_nob;
}
- spin_lock_init(&cfs_zone_guard);
+ spin_lock_init(&cfs_zone_guard);
#endif
- CFS_INIT_LIST_HEAD(&page_death_row);
- spin_lock_init(&page_death_row_phylax);
- raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
- return 0;
+ CFS_INIT_LIST_HEAD(&page_death_row);
+ spin_lock_init(&page_death_row_phylax);
+ raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
+ return 0;
}
void cfs_mem_fini(void)
{
- raw_page_death_row_clean();
- spin_lock_done(&page_death_row_phylax);
- cfs_mem_cache_destroy(raw_page_cache);
+ raw_page_death_row_clean();
+ spin_lock_done(&page_death_row_phylax);
+ cfs_mem_cache_destroy(raw_page_cache);
-#if CFS_INDIVIDUAL_ZONE
- cfs_zone_nob.z_nob = NULL;
- spin_lock_done(&cfs_zone_guard);
+#if CFS_INDIVIDUAL_ZONE
+ cfs_zone_nob.z_nob = NULL;
+ spin_lock_done(&cfs_zone_guard);
#endif
}
int count = cfs_atomic_inc_return(&cfs_fail_count);
if (count >= cfs_fail_val) {
- cfs_set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+ set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
cfs_atomic_set(&cfs_fail_count, 0);
/* we are lost race to increase */
if (count > cfs_fail_val)
if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
(value & CFS_FAIL_ONCE))
- cfs_set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+ set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
/* Lost race to set CFS_FAILED_BIT. */
- if (cfs_test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
+ if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
/* If CFS_FAIL_ONCE is valid, only one process can fail,
* otherwise multi-process can fail at the same time. */
if (cfs_fail_loc & CFS_FAIL_ONCE)
static inline void
cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
{
- cfs_spin_lock(&lock->spin);
+ spin_lock(&lock->spin);
}
static inline void
cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
{
- cfs_spin_unlock(&lock->spin);
+ spin_unlock(&lock->spin);
}
static inline void
cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
{
- if (!exclusive)
- cfs_read_lock(&lock->rw);
- else
- cfs_write_lock(&lock->rw);
+ if (!exclusive)
+ read_lock(&lock->rw);
+ else
+ write_lock(&lock->rw);
}
static inline void
cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
{
- if (!exclusive)
- cfs_read_unlock(&lock->rw);
- else
- cfs_write_unlock(&lock->rw);
+ if (!exclusive)
+ read_unlock(&lock->rw);
+ else
+ write_unlock(&lock->rw);
}
/** No lock hash */
static void
cfs_hash_lock_setup(cfs_hash_t *hs)
{
- if (cfs_hash_with_no_lock(hs)) {
- hs->hs_lops = &cfs_hash_nl_lops;
+ if (cfs_hash_with_no_lock(hs)) {
+ hs->hs_lops = &cfs_hash_nl_lops;
- } else if (cfs_hash_with_no_bktlock(hs)) {
- hs->hs_lops = &cfs_hash_nbl_lops;
- cfs_spin_lock_init(&hs->hs_lock.spin);
+ } else if (cfs_hash_with_no_bktlock(hs)) {
+ hs->hs_lops = &cfs_hash_nbl_lops;
+ spin_lock_init(&hs->hs_lock.spin);
- } else if (cfs_hash_with_rehash(hs)) {
- cfs_rwlock_init(&hs->hs_lock.rw);
+ } else if (cfs_hash_with_rehash(hs)) {
+ rwlock_init(&hs->hs_lock.rw);
if (cfs_hash_with_rw_bktlock(hs))
hs->hs_lops = &cfs_hash_bkt_rw_lops;
max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
return;
- cfs_spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_max = dep_cur;
- hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
- hs->hs_dep_off = bd->bd_offset;
- hs->hs_dep_bits = hs->hs_cur_bits;
- cfs_spin_unlock(&hs->hs_dep_lock);
+ spin_lock(&hs->hs_dep_lock);
+ hs->hs_dep_max = dep_cur;
+ hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
+ hs->hs_dep_off = bd->bd_offset;
+ hs->hs_dep_bits = hs->hs_cur_bits;
+ spin_unlock(&hs->hs_dep_lock);
cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
# endif
cfs_hash_with_no_bktlock(hs))
continue;
- if (cfs_hash_with_rw_bktlock(hs))
- cfs_rwlock_init(&new_bkts[i]->hsb_lock.rw);
- else if (cfs_hash_with_spin_bktlock(hs))
- cfs_spin_lock_init(&new_bkts[i]->hsb_lock.spin);
- else
- LBUG(); /* invalid use-case */
- }
- return new_bkts;
+ if (cfs_hash_with_rw_bktlock(hs))
+ rwlock_init(&new_bkts[i]->hsb_lock.rw);
+ else if (cfs_hash_with_spin_bktlock(hs))
+ spin_lock_init(&new_bkts[i]->hsb_lock.spin);
+ else
+ LBUG(); /* invalid use-case */
+ }
+ return new_bkts;
}
/**
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static int cfs_hash_dep_print(cfs_workitem_t *wi)
{
- cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
- int dep;
- int bkt;
- int off;
- int bits;
-
- cfs_spin_lock(&hs->hs_dep_lock);
- dep = hs->hs_dep_max;
- bkt = hs->hs_dep_bkt;
- off = hs->hs_dep_off;
- bits = hs->hs_dep_bits;
- cfs_spin_unlock(&hs->hs_dep_lock);
-
- LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
- hs->hs_name, bits, dep, bkt, off);
- cfs_spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_bits = 0; /* mark as workitem done */
- cfs_spin_unlock(&hs->hs_dep_lock);
- return 0;
+ cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+ int dep;
+ int bkt;
+ int off;
+ int bits;
+
+ spin_lock(&hs->hs_dep_lock);
+ dep = hs->hs_dep_max;
+ bkt = hs->hs_dep_bkt;
+ off = hs->hs_dep_off;
+ bits = hs->hs_dep_bits;
+ spin_unlock(&hs->hs_dep_lock);
+
+ LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
+ hs->hs_name, bits, dep, bkt, off);
+ spin_lock(&hs->hs_dep_lock);
+ hs->hs_dep_bits = 0; /* mark as workitem done */
+ spin_unlock(&hs->hs_dep_lock);
+ return 0;
}
static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
{
- cfs_spin_lock_init(&hs->hs_dep_lock);
+ spin_lock_init(&hs->hs_dep_lock);
cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
}
static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
{
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
- return;
+ return;
- cfs_spin_lock(&hs->hs_dep_lock);
- while (hs->hs_dep_bits != 0) {
- cfs_spin_unlock(&hs->hs_dep_lock);
- cfs_cond_resched();
- cfs_spin_lock(&hs->hs_dep_lock);
- }
- cfs_spin_unlock(&hs->hs_dep_lock);
+ spin_lock(&hs->hs_dep_lock);
+ while (hs->hs_dep_bits != 0) {
+ spin_unlock(&hs->hs_dep_lock);
+ cfs_cond_resched();
+ spin_lock(&hs->hs_dep_lock);
+ }
+ spin_unlock(&hs->hs_dep_lock);
}
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
if (maxdep < bd.bd_bucket->hsb_depmax) {
maxdep = bd.bd_bucket->hsb_depmax;
#ifdef __KERNEL__
- maxdepb = cfs_ffz(~maxdep);
+ maxdepb = ffz(~maxdep);
#endif
}
total += bd.bd_bucket->hsb_count;
};
static cfs_list_t kkuc_groups[KUC_GRP_MAX+1] = {};
/* Protect message sending against remove and adds */
-static CFS_DECLARE_RWSEM(kg_sem);
+static DECLARE_RWSEM(kg_sem);
/** Add a receiver to a broadcast group
* @param filp pipe to write into
reg->kr_uid = uid;
reg->kr_data = data;
- cfs_down_write(&kg_sem);
+ down_write(&kg_sem);
if (kkuc_groups[group].next == NULL)
CFS_INIT_LIST_HEAD(&kkuc_groups[group]);
cfs_list_add(®->kr_chain, &kkuc_groups[group]);
- cfs_up_write(&kg_sem);
+ up_write(&kg_sem);
CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group);
libcfs_kkuc_group_put(group, &lh);
}
- cfs_down_write(&kg_sem);
+ down_write(&kg_sem);
cfs_list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
if ((uid == 0) || (uid == reg->kr_uid)) {
cfs_list_del(®->kr_chain);
cfs_free(reg);
}
}
- cfs_up_write(&kg_sem);
+ up_write(&kg_sem);
RETURN(0);
}
int rc = 0;
ENTRY;
- cfs_down_read(&kg_sem);
+ down_read(&kg_sem);
cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
if (reg->kr_fp != NULL) {
rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
}
}
}
- cfs_up_read(&kg_sem);
+ up_read(&kg_sem);
RETURN(rc);
}
if (kkuc_groups[group].next == NULL)
RETURN(0);
- cfs_down_read(&kg_sem);
+ down_read(&kg_sem);
cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
if (reg->kr_fp != NULL) {
rc = cb_func(reg->kr_data, cb_arg);
}
}
- cfs_up_read(&kg_sem);
+ up_read(&kg_sem);
RETURN(rc);
}
cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
{
struct cfs_percpt_lock *pcl;
- cfs_spinlock_t *lock;
+ spinlock_t *lock;
int i;
/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
}
cfs_percpt_for_each(lock, i, pcl->pcl_locks)
- cfs_spin_lock_init(lock);
+ spin_lock_init(lock);
return pcl;
}
}
if (likely(index != CFS_PERCPT_LOCK_EX)) {
- cfs_spin_lock(pcl->pcl_locks[index]);
+ spin_lock(pcl->pcl_locks[index]);
return;
}
/* exclusive lock request */
for (i = 0; i < ncpt; i++) {
- cfs_spin_lock(pcl->pcl_locks[i]);
+ spin_lock(pcl->pcl_locks[i]);
if (i == 0) {
LASSERT(!pcl->pcl_locked);
/* nobody should take private lock after this
index = ncpt == 1 ? 0 : index;
if (likely(index != CFS_PERCPT_LOCK_EX)) {
- cfs_spin_unlock(pcl->pcl_locks[index]);
+ spin_unlock(pcl->pcl_locks[index]);
return;
}
LASSERT(pcl->pcl_locked);
pcl->pcl_locked = 0;
}
- cfs_spin_unlock(pcl->pcl_locks[i]);
+ spin_unlock(pcl->pcl_locks[i]);
}
}
CFS_EXPORT_SYMBOL(cfs_percpt_unlock);
int oom_get_adj(struct task_struct *task, int scope)
{
-
- int oom_adj;
+ int oom_adj;
#ifdef HAVE_OOMADJ_IN_SIG
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&task->sighand->siglock, flags);
- oom_adj = task->signal->oom_adj;
- task->signal->oom_adj = scope;
- spin_unlock_irqrestore(&task->sighand->siglock, flags);
+ spin_lock_irqsave(&task->sighand->siglock, flags);
+ oom_adj = task->signal->oom_adj;
+ task->signal->oom_adj = scope;
+ spin_unlock_irqrestore(&task->sighand->siglock, flags);
#else
- oom_adj = task->oomkilladj;
- task->oomkilladj = scope;
+ oom_adj = task->oomkilladj;
+ task->oomkilladj = scope;
#endif
- return oom_adj;
+ return oom_adj;
}
int cfs_create_thread(int (*fn)(void *),
void
cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
{
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
- __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
- spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+ spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+ __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
+ spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
}
EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
int cfs_tracefile_init_arch()
{
int j;
struct cfs_trace_cpu_data *tcd;
- cfs_init_rwsem(&cfs_tracefile_sem);
+ init_rwsem(&cfs_tracefile_sem);
/* initialize trace_data */
memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
/* arch related info initialized */
cfs_tcd_for_each(tcd, i, j) {
- cfs_spin_lock_init(&tcd->tcd_lock);
+ spin_lock_init(&tcd->tcd_lock);
tcd->tcd_pages_factor = pages_factor[i];
tcd->tcd_type = i;
tcd->tcd_cpu = j;
cfs_trace_data[i] = NULL;
}
- cfs_fini_rwsem(&cfs_tracefile_sem);
+ fini_rwsem(&cfs_tracefile_sem);
}
void cfs_tracefile_read_lock()
{
- cfs_down_read(&cfs_tracefile_sem);
+ down_read(&cfs_tracefile_sem);
}
void cfs_tracefile_read_unlock()
{
- cfs_up_read(&cfs_tracefile_sem);
+ up_read(&cfs_tracefile_sem);
}
void cfs_tracefile_write_lock()
{
- cfs_down_write(&cfs_tracefile_sem);
+ down_write(&cfs_tracefile_sem);
}
void cfs_tracefile_write_unlock()
{
- cfs_up_write(&cfs_tracefile_sem);
+ up_write(&cfs_tracefile_sem);
}
cfs_trace_buf_type_t cfs_trace_buf_idx_get()
int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- cfs_spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- cfs_spin_lock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- cfs_spin_lock_irq(&tcd->tcd_lock);
- else
- cfs_spin_lock(&tcd->tcd_lock);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ spin_lock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ spin_lock_irq(&tcd->tcd_lock);
+ else
+ spin_lock(&tcd->tcd_lock);
return 1;
}
void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- cfs_spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- cfs_spin_unlock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- cfs_spin_unlock_irq(&tcd->tcd_lock);
- else
- cfs_spin_unlock(&tcd->tcd_lock);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ spin_unlock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ spin_unlock_irq(&tcd->tcd_lock);
+ else
+ spin_unlock(&tcd->tcd_lock);
}
int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
RETURN(0);
}
-static cfs_rw_semaphore_t ioctl_list_sem;
+static struct rw_semaphore ioctl_list_sem;
static cfs_list_t ioctl_list;
int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
{
int rc = 0;
- cfs_down_write(&ioctl_list_sem);
+ down_write(&ioctl_list_sem);
if (!cfs_list_empty(&hand->item))
rc = -EBUSY;
else
cfs_list_add_tail(&hand->item, &ioctl_list);
- cfs_up_write(&ioctl_list_sem);
+ up_write(&ioctl_list_sem);
return rc;
}
{
int rc = 0;
- cfs_down_write(&ioctl_list_sem);
+ down_write(&ioctl_list_sem);
if (cfs_list_empty(&hand->item))
rc = -ENOENT;
else
cfs_list_del_init(&hand->item);
- cfs_up_write(&ioctl_list_sem);
+ up_write(&ioctl_list_sem);
return rc;
}
default: {
struct libcfs_ioctl_handler *hand;
err = -EINVAL;
- cfs_down_read(&ioctl_list_sem);
+ down_read(&ioctl_list_sem);
cfs_list_for_each_entry_typed(hand, &ioctl_list,
struct libcfs_ioctl_handler, item) {
err = hand->handle_ioctl(cmd, data);
break;
}
}
- cfs_up_read(&ioctl_list_sem);
+ up_read(&ioctl_list_sem);
break;
}
}
MODULE_LICENSE("GPL");
extern cfs_psdev_t libcfs_dev;
-extern cfs_rw_semaphore_t cfs_tracefile_sem;
-extern cfs_mutex_t cfs_trace_thread_mutex;
+extern struct rw_semaphore cfs_tracefile_sem;
+extern struct mutex cfs_trace_thread_mutex;
extern struct cfs_wi_sched *cfs_sched_rehash;
extern void libcfs_init_nidstrings(void);
libcfs_arch_init();
libcfs_init_nidstrings();
- cfs_init_rwsem(&cfs_tracefile_sem);
- cfs_mutex_init(&cfs_trace_thread_mutex);
- cfs_init_rwsem(&ioctl_list_sem);
+ init_rwsem(&cfs_tracefile_sem);
+ mutex_init(&cfs_trace_thread_mutex);
+ init_rwsem(&ioctl_list_sem);
CFS_INIT_LIST_HEAD(&ioctl_list);
cfs_waitq_init(&cfs_race_waitq);
printk(CFS_KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n",
rc);
- cfs_fini_rwsem(&ioctl_list_sem);
- cfs_fini_rwsem(&cfs_tracefile_sem);
+ fini_rwsem(&ioctl_list_sem);
+ fini_rwsem(&cfs_tracefile_sem);
libcfs_arch_cleanup();
}
static int libcfs_nidstring_idx = 0;
#ifdef __KERNEL__
-static cfs_spinlock_t libcfs_nidstring_lock;
+static spinlock_t libcfs_nidstring_lock;
void libcfs_init_nidstrings (void)
{
- cfs_spin_lock_init(&libcfs_nidstring_lock);
+ spin_lock_init(&libcfs_nidstring_lock);
}
-# define NIDSTR_LOCK(f) cfs_spin_lock_irqsave(&libcfs_nidstring_lock, f)
-# define NIDSTR_UNLOCK(f) cfs_spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
+# define NIDSTR_LOCK(f) spin_lock_irqsave(&libcfs_nidstring_lock, f)
+# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
#else
# define NIDSTR_LOCK(f) (f=sizeof(f)) /* avoid set-but-unused warnings */
# define NIDSTR_UNLOCK(f) (f=sizeof(f))
char cfs_tracefile[TRACEFILE_NAME_SIZE];
long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
static struct tracefiled_ctl trace_tctl;
-cfs_mutex_t cfs_trace_thread_mutex;
+struct mutex cfs_trace_thread_mutex;
static int thread_running = 0;
cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
pgcount + 1, tcd->tcd_cur_pages);
CFS_INIT_LIST_HEAD(&pc.pc_pages);
- cfs_spin_lock_init(&pc.pc_lock);
+ spin_lock_init(&pc.pc_lock);
cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
struct cfs_trace_page, linkage) {
static void collect_pages_on_all_cpus(struct page_collection *pc)
{
- struct cfs_trace_cpu_data *tcd;
- int i, cpu;
+ struct cfs_trace_cpu_data *tcd;
+ int i, cpu;
- cfs_spin_lock(&pc->pc_lock);
+ spin_lock(&pc->pc_lock);
cfs_for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
}
}
}
- cfs_spin_unlock(&pc->pc_lock);
+ spin_unlock(&pc->pc_lock);
}
static void collect_pages(struct page_collection *pc)
struct cfs_trace_page *tmp;
int i, cpu;
- cfs_spin_lock(&pc->pc_lock);
+ spin_lock(&pc->pc_lock);
cfs_for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cur_head = tcd->tcd_pages.next;
}
}
}
- cfs_spin_unlock(&pc->pc_lock);
+ spin_unlock(&pc->pc_lock);
}
static void put_pages_back(struct page_collection *pc)
* if we have been steadily writing (and otherwise discarding) pages via the
* debug daemon. */
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct cfs_trace_cpu_data *tcd)
+ struct cfs_trace_cpu_data *tcd)
{
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- cfs_spin_lock(&pc->pc_lock);
+ spin_lock(&pc->pc_lock);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
struct cfs_trace_page, linkage) {
tcd->tcd_cur_daemon_pages--;
}
}
- cfs_spin_unlock(&pc->pc_lock);
+ spin_unlock(&pc->pc_lock);
}
static void put_pages_on_daemon_list(struct page_collection *pc)
void cfs_trace_debug_print(void)
{
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
+ struct page_collection pc;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- cfs_spin_lock_init(&pc.pc_lock);
+ spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
goto out;
}
- cfs_spin_lock_init(&pc.pc_lock);
+ spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
if (cfs_list_empty(&pc.pc_pages)) {
void cfs_trace_flush_pages(void)
{
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
+ struct page_collection pc;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- cfs_spin_lock_init(&pc.pc_lock);
+ spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
/* this is so broken in uml? what on earth is going on? */
cfs_daemonize("ktracefiled");
- cfs_spin_lock_init(&pc.pc_lock);
- cfs_complete(&tctl->tctl_start);
+ spin_lock_init(&pc.pc_lock);
+ complete(&tctl->tctl_start);
while (1) {
cfs_waitlink_t __wait;
cfs_time_seconds(1));
cfs_waitq_del(&tctl->tctl_waitq, &__wait);
}
- cfs_complete(&tctl->tctl_stop);
+ complete(&tctl->tctl_stop);
return 0;
}
struct tracefiled_ctl *tctl = &trace_tctl;
int rc = 0;
- cfs_mutex_lock(&cfs_trace_thread_mutex);
+ mutex_lock(&cfs_trace_thread_mutex);
if (thread_running)
goto out;
- cfs_init_completion(&tctl->tctl_start);
- cfs_init_completion(&tctl->tctl_stop);
+ init_completion(&tctl->tctl_start);
+ init_completion(&tctl->tctl_stop);
cfs_waitq_init(&tctl->tctl_waitq);
cfs_atomic_set(&tctl->tctl_shutdown, 0);
goto out;
}
- cfs_wait_for_completion(&tctl->tctl_start);
+ wait_for_completion(&tctl->tctl_start);
thread_running = 1;
out:
- cfs_mutex_unlock(&cfs_trace_thread_mutex);
+ mutex_unlock(&cfs_trace_thread_mutex);
return rc;
}
{
struct tracefiled_ctl *tctl = &trace_tctl;
- cfs_mutex_lock(&cfs_trace_thread_mutex);
+ mutex_lock(&cfs_trace_thread_mutex);
if (thread_running) {
printk(CFS_KERN_INFO
"Lustre: shutting down debug daemon thread...\n");
cfs_atomic_set(&tctl->tctl_shutdown, 1);
- cfs_wait_for_completion(&tctl->tctl_stop);
+ wait_for_completion(&tctl->tctl_stop);
thread_running = 0;
}
- cfs_mutex_unlock(&cfs_trace_thread_mutex);
+ mutex_unlock(&cfs_trace_thread_mutex);
}
int cfs_tracefile_init(int max_pages)
static void cfs_trace_cleanup(void)
{
- struct page_collection pc;
+ struct page_collection pc;
- CFS_INIT_LIST_HEAD(&pc.pc_pages);
- cfs_spin_lock_init(&pc.pc_lock);
+ CFS_INIT_LIST_HEAD(&pc.pc_pages);
+ spin_lock_init(&pc.pc_lock);
- trace_cleanup_on_all_cpus();
+ trace_cleanup_on_all_cpus();
- cfs_tracefile_fini_arch();
+ cfs_tracefile_fini_arch();
}
void cfs_tracefile_exit(void)
* and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
* tcd_for_each_type_lock
*/
- cfs_spinlock_t tcd_lock;
+ spinlock_t tcd_lock;
unsigned long tcd_lock_flags;
/*
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
struct page_collection {
- cfs_list_t pc_pages;
+ cfs_list_t pc_pages;
/*
* spin-lock protecting ->pc_pages. It is taken by smp_call_function()
* call-back functions. XXX nikita: Which is horrible: all processors
* lock. Probably ->pc_pages should be replaced with an array of
* NR_CPUS elements accessed locklessly.
*/
- cfs_spinlock_t pc_lock;
+ spinlock_t pc_lock;
/*
* if this flag is set, collect_pages() will spill both
* ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
* only ->tcd_pages are spilled.
*/
- int pc_want_daemon_pages;
+ int pc_want_daemon_pages;
};
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
struct tracefiled_ctl {
- cfs_completion_t tctl_start;
- cfs_completion_t tctl_stop;
- cfs_waitq_t tctl_waitq;
- pid_t tctl_pid;
- cfs_atomic_t tctl_shutdown;
+ struct completion tctl_start;
+ struct completion tctl_stop;
+ cfs_waitq_t tctl_waitq;
+ pid_t tctl_pid;
+ cfs_atomic_t tctl_shutdown;
};
/*
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
find_again:
found = 0;
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
/* check invalid & expired items */
if (check_unlink_entry(cache, entry))
if (!found) {
if (!new) {
- cfs_spin_unlock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
new = alloc_entry(cache, key, args);
if (!new) {
CERROR("fail to alloc entry\n");
if (UC_CACHE_IS_NEW(entry)) {
UC_CACHE_SET_ACQUIRING(entry);
UC_CACHE_CLEAR_NEW(entry);
- cfs_spin_unlock(&cache->uc_lock);
- rc = refresh_entry(cache, entry);
- cfs_spin_lock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
+ rc = refresh_entry(cache, entry);
+ spin_lock(&cache->uc_lock);
entry->ue_acquire_expire =
cfs_time_shift(cache->uc_acquire_expire);
if (rc < 0) {
cfs_waitlink_init(&wait);
cfs_waitq_add(&entry->ue_waitq, &wait);
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_spin_unlock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
- left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
- expiry);
+ left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+ expiry);
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
cfs_waitq_del(&entry->ue_waitq, &wait);
if (UC_CACHE_IS_ACQUIRING(entry)) {
/* we're interrupted or upcall failed in the middle */
* without any error, should at least give a
* chance to use it once.
*/
- if (entry != new) {
- put_entry(cache, entry);
- cfs_spin_unlock(&cache->uc_lock);
- new = NULL;
- goto find_again;
- }
- }
+ if (entry != new) {
+ put_entry(cache, entry);
+ spin_unlock(&cache->uc_lock);
+ new = NULL;
+ goto find_again;
+ }
+ }
/* Now we know it's good */
out:
- cfs_spin_unlock(&cache->uc_lock);
- RETURN(entry);
+ spin_unlock(&cache->uc_lock);
+ RETURN(entry);
}
EXPORT_SYMBOL(upcall_cache_get_entry);
void upcall_cache_put_entry(struct upcall_cache *cache,
struct upcall_cache_entry *entry)
{
- ENTRY;
-
- if (!entry) {
- EXIT;
- return;
- }
-
- LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
- cfs_spin_lock(&cache->uc_lock);
- put_entry(cache, entry);
- cfs_spin_unlock(&cache->uc_lock);
- EXIT;
+ ENTRY;
+
+ if (!entry) {
+ EXIT;
+ return;
+ }
+
+ LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+ spin_lock(&cache->uc_lock);
+ put_entry(cache, entry);
+ spin_unlock(&cache->uc_lock);
+ EXIT;
}
EXPORT_SYMBOL(upcall_cache_put_entry);
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
cfs_list_for_each_entry(entry, head, ue_hash) {
if (downcall_compare(cache, entry, key, args) == 0) {
found = 1;
CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
cache->uc_name, key);
/* haven't found, it's possible */
- cfs_spin_unlock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
RETURN(-EINVAL);
}
GOTO(out, rc = -EINVAL);
}
- cfs_spin_unlock(&cache->uc_lock);
- if (cache->uc_ops->parse_downcall)
- rc = cache->uc_ops->parse_downcall(cache, entry, args);
- cfs_spin_lock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
+ if (cache->uc_ops->parse_downcall)
+ rc = cache->uc_ops->parse_downcall(cache, entry, args);
+ spin_lock(&cache->uc_lock);
if (rc)
GOTO(out, rc);
cfs_list_del_init(&entry->ue_hash);
}
UC_CACHE_CLEAR_ACQUIRING(entry);
- cfs_spin_unlock(&cache->uc_lock);
- cfs_waitq_broadcast(&entry->ue_waitq);
- put_entry(cache, entry);
+ spin_unlock(&cache->uc_lock);
+ cfs_waitq_broadcast(&entry->ue_waitq);
+ put_entry(cache, entry);
- RETURN(rc);
+ RETURN(rc);
}
EXPORT_SYMBOL(upcall_cache_downcall);
static void cache_flush(struct upcall_cache *cache, int force)
{
- struct upcall_cache_entry *entry, *next;
- int i;
- ENTRY;
+ struct upcall_cache_entry *entry, *next;
+ int i;
+ ENTRY;
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
cfs_list_for_each_entry_safe(entry, next,
&cache->uc_hashtable[i], ue_hash) {
free_entry(cache, entry);
}
}
- cfs_spin_unlock(&cache->uc_lock);
- EXIT;
+ spin_unlock(&cache->uc_lock);
+ EXIT;
}
void upcall_cache_flush_idle(struct upcall_cache *cache)
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
cfs_list_for_each_entry(entry, head, ue_hash) {
if (upcall_compare(cache, entry, key, args) == 0) {
found = 1;
if (!cfs_atomic_read(&entry->ue_refcount))
free_entry(cache, entry);
}
- cfs_spin_unlock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
}
EXPORT_SYMBOL(upcall_cache_flush_one);
if (!cache)
RETURN(ERR_PTR(-ENOMEM));
- cfs_spin_lock_init(&cache->uc_lock);
- cfs_rwlock_init(&cache->uc_upcall_rwlock);
+ spin_lock_init(&cache->uc_lock);
+ rwlock_init(&cache->uc_upcall_rwlock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
#define OFF_BY_START(start) ((start)/BITS_PER_LONG)
-unsigned long cfs_find_next_bit(unsigned long *addr,
+unsigned long find_next_bit(unsigned long *addr,
unsigned long size, unsigned long offset)
{
unsigned long *word, *last;
return base + bit;
}
-unsigned long cfs_find_next_zero_bit(unsigned long *addr,
+unsigned long find_next_zero_bit(unsigned long *addr,
unsigned long size, unsigned long offset)
{
unsigned long *word, *last;
* No-op implementation.
*/
-void cfs_spin_lock_init(cfs_spinlock_t *lock)
+void spin_lock_init(spinlock_t *lock)
{
- LASSERT(lock != NULL);
- (void)lock;
+ LASSERT(lock != NULL);
+ (void)lock;
}
-void cfs_spin_lock(cfs_spinlock_t *lock)
+void spin_lock(spinlock_t *lock)
{
- (void)lock;
+ (void)lock;
}
-void cfs_spin_unlock(cfs_spinlock_t *lock)
+void spin_unlock(spinlock_t *lock)
{
- (void)lock;
+ (void)lock;
}
-int cfs_spin_trylock(cfs_spinlock_t *lock)
+int spin_trylock(spinlock_t *lock)
{
- (void)lock;
+ (void)lock;
return 1;
}
-void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
+void spin_lock_bh_init(spinlock_t *lock)
{
- LASSERT(lock != NULL);
- (void)lock;
+ LASSERT(lock != NULL);
+ (void)lock;
}
-void cfs_spin_lock_bh(cfs_spinlock_t *lock)
+void spin_lock_bh(spinlock_t *lock)
{
- LASSERT(lock != NULL);
- (void)lock;
+ LASSERT(lock != NULL);
+ (void)lock;
}
-void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
+void spin_unlock_bh(spinlock_t *lock)
{
- LASSERT(lock != NULL);
- (void)lock;
+ LASSERT(lock != NULL);
+ (void)lock;
}
/*
* - __up(x)
*/
-void cfs_sema_init(cfs_semaphore_t *s, int val)
+void sema_init(struct semaphore *s, int val)
{
- LASSERT(s != NULL);
- (void)s;
- (void)val;
+ LASSERT(s != NULL);
+ (void)s;
+ (void)val;
}
-void __down(cfs_semaphore_t *s)
+void __down(struct semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-int __down_interruptible(cfs_semaphore_t *s)
+int __down_interruptible(struct semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
- return 0;
+ LASSERT(s != NULL);
+ (void)s;
+ return 0;
}
-void __up(cfs_semaphore_t *s)
+void __up(struct semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
* - wait_for_completion(c)
*/
-static cfs_wait_handler_t wait_handler;
+static wait_handler_t wait_handler;
-void cfs_init_completion_module(cfs_wait_handler_t handler)
+void init_completion_module(wait_handler_t handler)
{
- wait_handler = handler;
+ wait_handler = handler;
}
-int cfs_call_wait_handler(int timeout)
+int call_wait_handler(int timeout)
{
- if (!wait_handler)
- return -ENOSYS;
- return wait_handler(timeout);
+ if (!wait_handler)
+ return -ENOSYS;
+ return wait_handler(timeout);
}
-void cfs_init_completion(cfs_completion_t *c)
+void init_completion(struct completion *c)
{
- LASSERT(c != NULL);
- c->done = 0;
- cfs_waitq_init(&c->wait);
+ LASSERT(c != NULL);
+ c->done = 0;
+ cfs_waitq_init(&c->wait);
}
-void cfs_complete(cfs_completion_t *c)
+void complete(struct completion *c)
{
- LASSERT(c != NULL);
- c->done = 1;
- cfs_waitq_signal(&c->wait);
+ LASSERT(c != NULL);
+ c->done = 1;
+ cfs_waitq_signal(&c->wait);
}
-void cfs_wait_for_completion(cfs_completion_t *c)
+void wait_for_completion(struct completion *c)
{
- LASSERT(c != NULL);
- do {
- if (cfs_call_wait_handler(1000) < 0)
- break;
- } while (c->done == 0);
+ LASSERT(c != NULL);
+ do {
+ if (call_wait_handler(1000) < 0)
+ break;
+ } while (c->done == 0);
}
-int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
+int wait_for_completion_interruptible(struct completion *c)
{
- LASSERT(c != NULL);
- do {
- if (cfs_call_wait_handler(1000) < 0)
- break;
- } while (c->done == 0);
- return 0;
+ LASSERT(c != NULL);
+ do {
+ if (call_wait_handler(1000) < 0)
+ break;
+ } while (c->done == 0);
+ return 0;
}
/*
* - up_write(x)
*/
-void cfs_init_rwsem(cfs_rw_semaphore_t *s)
+void init_rwsem(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-void cfs_down_read(cfs_rw_semaphore_t *s)
+void down_read(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
+int down_read_trylock(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
return 1;
}
-void cfs_down_write(cfs_rw_semaphore_t *s)
+void down_write(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
+int down_write_trylock(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
return 1;
}
-void cfs_up_read(cfs_rw_semaphore_t *s)
+void up_read(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-void cfs_up_write(cfs_rw_semaphore_t *s)
+void up_write(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
+void fini_rwsem(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
#ifdef HAVE_LIBPTHREAD
* Multi-threaded user space completion
*/
-void cfs_mt_init_completion(cfs_mt_completion_t *c)
+void mt_init_completion(mt_completion_t *c)
{
LASSERT(c != NULL);
c->c_done = 0;
pthread_cond_init(&c->c_cond, NULL);
}
-void cfs_mt_fini_completion(cfs_mt_completion_t *c)
+void mt_fini_completion(mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_destroy(&c->c_mut);
pthread_cond_destroy(&c->c_cond);
}
-void cfs_mt_complete(cfs_mt_completion_t *c)
+void mt_complete(mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_lock(&c->c_mut);
pthread_mutex_unlock(&c->c_mut);
}
-void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
+void mt_wait_for_completion(mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_lock(&c->c_mut);
static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
-int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
+int mt_atomic_read(mt_atomic_t *a)
{
int r;
return r;
}
-void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
+void mt_atomic_set(mt_atomic_t *a, int b)
{
pthread_mutex_lock(&atomic_guard_lock);
a->counter = b;
pthread_mutex_unlock(&atomic_guard_lock);
}
-int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
+int mt_atomic_dec_and_test(mt_atomic_t *a)
{
int r;
return (r == 0);
}
-void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
+void mt_atomic_inc(mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
++a->counter;
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
+void mt_atomic_dec(mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
--a->counter;
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
+void mt_atomic_add(int b, mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
+void mt_atomic_sub(int b, mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
a->counter -= b;
(void)link;
/* well, wait for something to happen */
- cfs_call_wait_handler(0);
+ call_wait_handler(0);
}
int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
{
LASSERT(link != NULL);
(void)link;
- cfs_call_wait_handler(timeout);
+ call_wait_handler(timeout);
return 0;
}
#include "tracefile.h"
struct lc_watchdog {
- cfs_spinlock_t lcw_lock; /* check or change lcw_list */
+ spinlock_t lcw_lock; /* check or change lcw_list */
int lcw_refcount; /* must hold lcw_pending_timers_lock */
cfs_timer_t lcw_timer; /* kernel timer */
cfs_list_t lcw_list; /* chain on pending list */
* and lcw_stop_completion when it exits.
* Wake lcw_event_waitq to signal timer callback dispatches.
*/
-static cfs_completion_t lcw_start_completion;
-static cfs_completion_t lcw_stop_completion;
+static struct completion lcw_start_completion;
+static struct completion lcw_stop_completion;
static cfs_waitq_t lcw_event_waitq;
/*
* When it hits 0, we stop the dispatcher.
*/
static __u32 lcw_refcount = 0;
-static CFS_DEFINE_MUTEX(lcw_refcount_mutex);
+static DEFINE_MUTEX(lcw_refcount_mutex);
/*
* List of timers that have fired that need their callbacks run by the
{
ENTRY;
#if defined(HAVE_TASKLIST_LOCK)
- cfs_read_lock(&tasklist_lock);
+ read_lock(&tasklist_lock);
#else
rcu_read_lock();
#endif
}
#if defined(HAVE_TASKLIST_LOCK)
- cfs_read_unlock(&tasklist_lock);
+ read_unlock(&tasklist_lock);
#else
rcu_read_unlock();
#endif
lcw->lcw_state = LC_WATCHDOG_EXPIRED;
- cfs_spin_lock_bh(&lcw->lcw_lock);
- LASSERT(cfs_list_empty(&lcw->lcw_list));
+ spin_lock_bh(&lcw->lcw_lock);
+ LASSERT(cfs_list_empty(&lcw->lcw_list));
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- lcw->lcw_refcount++; /* +1 for pending list */
- cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
- cfs_waitq_signal(&lcw_event_waitq);
+ spin_lock_bh(&lcw_pending_timers_lock);
+ lcw->lcw_refcount++; /* +1 for pending list */
+ cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
+ cfs_waitq_signal(&lcw_event_waitq);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- cfs_spin_unlock_bh(&lcw->lcw_lock);
- EXIT;
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ spin_unlock_bh(&lcw->lcw_lock);
+ EXIT;
}
static int is_watchdog_fired(void)
{
- int rc;
+ int rc;
- if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags))
- return 1;
+ if (test_bit(LCW_FLAG_STOP, &lcw_flags))
+ return 1;
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- rc = !cfs_list_empty(&lcw_pending_timers);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- return rc;
+ spin_lock_bh(&lcw_pending_timers_lock);
+ rc = !cfs_list_empty(&lcw_pending_timers);
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ return rc;
}
static void lcw_dump_stack(struct lc_watchdog *lcw)
RECALC_SIGPENDING;
SIGNAL_MASK_UNLOCK(current, flags);
- cfs_complete(&lcw_start_completion);
+ complete(&lcw_start_completion);
while (1) {
int dumplog = 1;
cfs_wait_event_interruptible(lcw_event_waitq,
is_watchdog_fired(), rc);
CDEBUG(D_INFO, "Watchdog got woken up...\n");
- if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) {
- CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n");
-
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- rc = !cfs_list_empty(&lcw_pending_timers);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- if (rc) {
- CERROR("pending timers list was not empty at "
- "time of watchdog dispatch shutdown\n");
- }
- break;
- }
-
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
+ CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
+
+ spin_lock_bh(&lcw_pending_timers_lock);
+ rc = !cfs_list_empty(&lcw_pending_timers);
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ if (rc) {
+ CERROR("pending timers list was not empty at "
+ "time of watchdog dispatch shutdown\n");
+ }
+ break;
+ }
+
+ spin_lock_bh(&lcw_pending_timers_lock);
while (!cfs_list_empty(&lcw_pending_timers)) {
int is_dumplog;
/* +1 ref for callback to make sure lwc wouldn't be
* deleted after releasing lcw_pending_timers_lock */
lcw->lcw_refcount++;
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-
- /* lock ordering */
- cfs_spin_lock_bh(&lcw->lcw_lock);
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
-
- if (cfs_list_empty(&lcw->lcw_list)) {
- /* already removed from pending list */
- lcw->lcw_refcount--; /* -1 ref for callback */
- if (lcw->lcw_refcount == 0)
- cfs_list_add(&lcw->lcw_list, &zombies);
- cfs_spin_unlock_bh(&lcw->lcw_lock);
+ spin_unlock_bh(&lcw_pending_timers_lock);
+
+ /* lock ordering */
+ spin_lock_bh(&lcw->lcw_lock);
+ spin_lock_bh(&lcw_pending_timers_lock);
+
+ if (cfs_list_empty(&lcw->lcw_list)) {
+ /* already removed from pending list */
+ lcw->lcw_refcount--; /* -1 ref for callback */
+ if (lcw->lcw_refcount == 0)
+ cfs_list_add(&lcw->lcw_list, &zombies);
+ spin_unlock_bh(&lcw->lcw_lock);
/* still hold lcw_pending_timers_lock */
continue;
}
cfs_list_del_init(&lcw->lcw_list);
lcw->lcw_refcount--; /* -1 ref for pending list */
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- cfs_spin_unlock_bh(&lcw->lcw_lock);
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ spin_unlock_bh(&lcw->lcw_lock);
CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
lcw->lcw_pid);
dumplog = 0;
}
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- lcw->lcw_refcount--; /* -1 ref for callback */
- if (lcw->lcw_refcount == 0)
- cfs_list_add(&lcw->lcw_list, &zombies);
- }
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+ spin_lock_bh(&lcw_pending_timers_lock);
+ lcw->lcw_refcount--; /* -1 ref for callback */
+ if (lcw->lcw_refcount == 0)
+ cfs_list_add(&lcw->lcw_list, &zombies);
+ }
+ spin_unlock_bh(&lcw_pending_timers_lock);
while (!cfs_list_empty(&zombies)) {
lcw = cfs_list_entry(lcw_pending_timers.next,
}
}
- cfs_complete(&lcw_stop_completion);
+ complete(&lcw_stop_completion);
- RETURN(rc);
+ RETURN(rc);
}
static void lcw_dispatch_start(void)
{
- int rc;
+ int rc;
- ENTRY;
- LASSERT(lcw_refcount == 1);
+ ENTRY;
+ LASSERT(lcw_refcount == 1);
- cfs_init_completion(&lcw_stop_completion);
- cfs_init_completion(&lcw_start_completion);
+ init_completion(&lcw_stop_completion);
+ init_completion(&lcw_start_completion);
cfs_waitq_init(&lcw_event_waitq);
CDEBUG(D_INFO, "starting dispatch thread\n");
EXIT;
return;
}
- cfs_wait_for_completion(&lcw_start_completion);
- CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
+ wait_for_completion(&lcw_start_completion);
+ CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
- EXIT;
+ EXIT;
}
static void lcw_dispatch_stop(void)
{
- ENTRY;
- LASSERT(lcw_refcount == 0);
+ ENTRY;
+ LASSERT(lcw_refcount == 0);
- CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
+ CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
- cfs_set_bit(LCW_FLAG_STOP, &lcw_flags);
- cfs_waitq_signal(&lcw_event_waitq);
+ set_bit(LCW_FLAG_STOP, &lcw_flags);
+ cfs_waitq_signal(&lcw_event_waitq);
- cfs_wait_for_completion(&lcw_stop_completion);
+ wait_for_completion(&lcw_stop_completion);
- CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
+ CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
- EXIT;
+ EXIT;
}
struct lc_watchdog *lc_watchdog_add(int timeout,
RETURN(ERR_PTR(-ENOMEM));
}
- cfs_spin_lock_init(&lcw->lcw_lock);
+ spin_lock_init(&lcw->lcw_lock);
lcw->lcw_refcount = 1; /* refcount for owner */
lcw->lcw_task = cfs_current();
lcw->lcw_pid = cfs_curproc_pid();
CFS_INIT_LIST_HEAD(&lcw->lcw_list);
cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
- cfs_mutex_lock(&lcw_refcount_mutex);
- if (++lcw_refcount == 1)
- lcw_dispatch_start();
- cfs_mutex_unlock(&lcw_refcount_mutex);
+ mutex_lock(&lcw_refcount_mutex);
+ if (++lcw_refcount == 1)
+ lcw_dispatch_start();
+ mutex_unlock(&lcw_refcount_mutex);
/* Keep this working in case we enable them by default */
if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
{
- cfs_spin_lock_bh(&lcw->lcw_lock);
- if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- cfs_list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- }
-
- cfs_spin_unlock_bh(&lcw->lcw_lock);
+ spin_lock_bh(&lcw->lcw_lock);
+ if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+ spin_lock_bh(&lcw_pending_timers_lock);
+ cfs_list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ }
+
+ spin_unlock_bh(&lcw->lcw_lock);
}
void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
lcw_update_time(lcw, "stopped");
- cfs_spin_lock_bh(&lcw->lcw_lock);
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
- cfs_list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
- }
+ spin_lock_bh(&lcw->lcw_lock);
+ spin_lock_bh(&lcw_pending_timers_lock);
+ if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+ cfs_list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
+ }
- lcw->lcw_refcount--; /* -1 ref for owner */
- dead = lcw->lcw_refcount == 0;
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- cfs_spin_unlock_bh(&lcw->lcw_lock);
+ lcw->lcw_refcount--; /* -1 ref for owner */
+ dead = lcw->lcw_refcount == 0;
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ spin_unlock_bh(&lcw->lcw_lock);
- if (dead)
- LIBCFS_FREE(lcw, sizeof(*lcw));
+ if (dead)
+ LIBCFS_FREE(lcw, sizeof(*lcw));
- cfs_mutex_lock(&lcw_refcount_mutex);
- if (--lcw_refcount == 0)
- lcw_dispatch_stop();
- cfs_mutex_unlock(&lcw_refcount_mutex);
+ mutex_lock(&lcw_refcount_mutex);
+ if (--lcw_refcount == 0)
+ lcw_dispatch_stop();
+ mutex_unlock(&lcw_refcount_mutex);
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(lc_watchdog_delete);
PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
- cfs_spin_lock(&(cfs_win_task_manger.Lock));
+ spin_lock(&(cfs_win_task_manger.Lock));
ListEntry = cfs_win_task_manger.TaskList.Flink;
while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
ListEntry = ListEntry->Flink;
}
- cfs_spin_unlock(&(cfs_win_task_manger.Lock));
+ spin_unlock(&(cfs_win_task_manger.Lock));
}
int
cfs_win_task_manger.Magic = TASKMAN_MAGIC;
/* initialize the spinlock protection */
- cfs_spin_lock_init(&cfs_win_task_manger.Lock);
+ spin_lock_init(&cfs_win_task_manger.Lock);
/* create slab memory cache */
cfs_win_task_manger.slab = cfs_mem_cache_create(
}
/* cleanup all the taskslots attached to the list */
- cfs_spin_lock(&(cfs_win_task_manger.Lock));
+ spin_lock(&(cfs_win_task_manger.Lock));
while (!IsListEmpty(&(cfs_win_task_manger.TaskList))) {
cleanup_task_slot(TaskSlot);
}
- cfs_spin_unlock(&cfs_win_task_manger.Lock);
+ spin_unlock(&cfs_win_task_manger.Lock);
/* destroy the taskslot cache slab */
cfs_mem_cache_destroy(cfs_win_task_manger.slab);
PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
- cfs_spin_lock(&(cfs_win_task_manger.Lock));
+ spin_lock(&(cfs_win_task_manger.Lock));
ListEntry = cfs_win_task_manger.TaskList.Flink;
while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
errorout:
- cfs_spin_unlock(&(cfs_win_task_manger.Lock));
+ spin_unlock(&(cfs_win_task_manger.Lock));
if (!TaskSlot) {
cfs_enter_debugger();
return cfs_atomic_add_return(-i, v);
}
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock)
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock)
{
- if (cfs_atomic_read(v) != 1) {
- return 0;
- }
+ if (cfs_atomic_read(v) != 1)
+ return 0;
- cfs_spin_lock(lock);
+ spin_lock(lock);
if (cfs_atomic_dec_and_test(v))
return 1;
- cfs_spin_unlock(lock);
+ spin_unlock(lock);
return 0;
}
void
-cfs_rwlock_init(cfs_rwlock_t * rwlock)
+rwlock_init(rwlock_t *rwlock)
{
- cfs_spin_lock_init(&rwlock->guard);
- rwlock->count = 0;
+ spin_lock_init(&rwlock->guard);
+ rwlock->count = 0;
}
void
-cfs_rwlock_fini(cfs_rwlock_t * rwlock)
+cfs_rwlock_fini(rwlock_t *rwlock)
{
}
void
-cfs_read_lock(cfs_rwlock_t * rwlock)
+read_lock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot->irql = KeRaiseIrqlToDpcLevel();
- while (TRUE) {
- cfs_spin_lock(&rwlock->guard);
- if (rwlock->count >= 0)
- break;
- cfs_spin_unlock(&rwlock->guard);
- }
+ while (TRUE) {
+ spin_lock(&rwlock->guard);
+ if (rwlock->count >= 0)
+ break;
+ spin_unlock(&rwlock->guard);
+ }
rwlock->count++;
- cfs_spin_unlock(&rwlock->guard);
+ spin_unlock(&rwlock->guard);
}
void
-cfs_read_unlock(cfs_rwlock_t * rwlock)
+read_unlock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
ASSERT(slot->Magic == TASKSLT_MAGIC);
-
- cfs_spin_lock(&rwlock->guard);
+
+ spin_lock(&rwlock->guard);
ASSERT(rwlock->count > 0);
- rwlock->count--;
- if (rwlock < 0) {
- cfs_enter_debugger();
- }
- cfs_spin_unlock(&rwlock->guard);
+ rwlock->count--;
+ if (rwlock < 0)
+ cfs_enter_debugger();
+ spin_unlock(&rwlock->guard);
- KeLowerIrql(slot->irql);
+ KeLowerIrql(slot->irql);
}
void
-cfs_write_lock(cfs_rwlock_t * rwlock)
+write_lock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot->irql = KeRaiseIrqlToDpcLevel();
- while (TRUE) {
- cfs_spin_lock(&rwlock->guard);
- if (rwlock->count == 0)
- break;
- cfs_spin_unlock(&rwlock->guard);
- }
+ while (TRUE) {
+ spin_lock(&rwlock->guard);
+ if (rwlock->count == 0)
+ break;
+ spin_unlock(&rwlock->guard);
+ }
rwlock->count = -1;
- cfs_spin_unlock(&rwlock->guard);
+ spin_unlock(&rwlock->guard);
}
void
-cfs_write_unlock(cfs_rwlock_t * rwlock)
+write_unlock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
ASSERT(slot->Magic == TASKSLT_MAGIC);
-
- cfs_spin_lock(&rwlock->guard);
+
+ spin_lock(&rwlock->guard);
ASSERT(rwlock->count == -1);
- rwlock->count = 0;
- cfs_spin_unlock(&rwlock->guard);
+ rwlock->count = 0;
+ spin_unlock(&rwlock->guard);
- KeLowerIrql(slot->irql);
+ KeLowerIrql(slot->irql);
}
pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
pg->mapping = addr;
cfs_atomic_set(&pg->count, 1);
- cfs_set_bit(PG_virt, &(pg->flags));
+ set_bit(PG_virt, &(pg->flags));
cfs_enter_debugger();
return pg;
}
ASSERT(pg->addr != NULL);
ASSERT(cfs_atomic_read(&pg->count) <= 1);
- if (!cfs_test_bit(PG_virt, &pg->flags)) {
+ if (!test_bit(PG_virt, &pg->flags)) {
cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
cfs_atomic_dec(&libcfs_total_pages);
} else {
ExFreeToNPagedLookasideList(&(kmc->npll), buf);
}
-cfs_spinlock_t shrinker_guard = {0};
+spinlock_t shrinker_guard = {0};
CFS_LIST_HEAD(shrinker_hdr);
cfs_timer_t shrinker_timer = {0};
{
struct cfs_shrinker * s = (struct cfs_shrinker *)
cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
- if (s) {
- s->cb = cb;
- s->seeks = seeks;
- s->nr = 2;
- cfs_spin_lock(&shrinker_guard);
- cfs_list_add(&s->list, &shrinker_hdr);
- cfs_spin_unlock(&shrinker_guard);
- }
-
- return s;
+ if (s) {
+ s->cb = cb;
+ s->seeks = seeks;
+ s->nr = 2;
+ spin_lock(&shrinker_guard);
+ cfs_list_add(&s->list, &shrinker_hdr);
+ spin_unlock(&shrinker_guard);
+ }
+
+ return s;
}
void cfs_remove_shrinker(struct cfs_shrinker *s)
{
- struct cfs_shrinker *tmp;
- cfs_spin_lock(&shrinker_guard);
+ struct cfs_shrinker *tmp;
+ spin_lock(&shrinker_guard);
#if TRUE
cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
struct cfs_shrinker, list) {
#else
cfs_list_del(&s->list);
#endif
- cfs_spin_unlock(&shrinker_guard);
- cfs_free(s);
+ spin_unlock(&shrinker_guard);
+ cfs_free(s);
}
/* time ut test proc */
void shrinker_timer_proc(ulong_ptr_t arg)
{
- struct cfs_shrinker *s;
- cfs_spin_lock(&shrinker_guard);
-
- cfs_list_for_each_entry_typed(s, &shrinker_hdr,
- struct cfs_shrinker, list) {
- s->cb(s->nr, __GFP_FS);
- }
- cfs_spin_unlock(&shrinker_guard);
- cfs_timer_arm(&shrinker_timer, 300);
+ struct cfs_shrinker *s;
+ spin_lock(&shrinker_guard);
+
+ cfs_list_for_each_entry_typed(s, &shrinker_hdr,
+ struct cfs_shrinker, list) {
+ s->cb(s->nr, __GFP_FS);
+ }
+ spin_unlock(&shrinker_guard);
+ cfs_timer_arm(&shrinker_timer, 300);
}
int start_shrinker_timer()
*/
-static CFS_DECLARE_RWSEM(cfs_symbol_lock);
+static DECLARE_RWSEM(cfs_symbol_lock);
CFS_LIST_HEAD(cfs_symbol_list);
int libcfs_is_mp_system = FALSE;
cfs_list_t *walker;
struct cfs_symbol *sym = NULL;
- cfs_down_read(&cfs_symbol_lock);
+ down_read(&cfs_symbol_lock);
cfs_list_for_each(walker, &cfs_symbol_list) {
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
break;
}
}
- cfs_up_read(&cfs_symbol_lock);
+ up_read(&cfs_symbol_lock);
if (sym != NULL)
return sym->value;
cfs_list_t *walker;
struct cfs_symbol *sym = NULL;
- cfs_down_read(&cfs_symbol_lock);
+ down_read(&cfs_symbol_lock);
cfs_list_for_each(walker, &cfs_symbol_list) {
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
break;
}
}
- cfs_up_read(&cfs_symbol_lock);
+ up_read(&cfs_symbol_lock);
LASSERT(sym != NULL);
}
new->ref = 0;
CFS_INIT_LIST_HEAD(&new->sym_list);
- cfs_down_write(&cfs_symbol_lock);
- cfs_list_for_each(walker, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
- if (!strcmp(sym->name, name)) {
- cfs_up_write(&cfs_symbol_lock);
- cfs_free(new);
- return 0; // alreay registerred
- }
- }
- cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
- cfs_up_write(&cfs_symbol_lock);
+ down_write(&cfs_symbol_lock);
+ cfs_list_for_each(walker, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ if (!strcmp(sym->name, name)) {
+ up_write(&cfs_symbol_lock);
+ cfs_free(new);
+ return 0; /* alreay registerred */
+ }
+ }
+ cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
+ up_write(&cfs_symbol_lock);
return 0;
}
cfs_list_t *nxt;
struct cfs_symbol *sym = NULL;
- cfs_down_write(&cfs_symbol_lock);
+ down_write(&cfs_symbol_lock);
cfs_list_for_each_safe(walker, nxt, &cfs_symbol_list) {
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
break;
}
}
- cfs_up_write(&cfs_symbol_lock);
+ up_write(&cfs_symbol_lock);
}
/*
cfs_list_t *walker;
struct cfs_symbol *sym = NULL;
- cfs_down_write(&cfs_symbol_lock);
- cfs_list_for_each(walker, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
- LASSERT(sym->ref == 0);
- cfs_list_del (&sym->sym_list);
- cfs_free(sym);
- }
- cfs_up_write(&cfs_symbol_lock);
- return;
+ down_write(&cfs_symbol_lock);
+ cfs_list_for_each(walker, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ LASSERT(sym->ref == 0);
+ cfs_list_del (&sym->sym_list);
+ cfs_free(sym);
+ }
+ up_write(&cfs_symbol_lock);
+ return;
}
int
libcfs_arch_init(void)
{
- int rc;
+ int rc;
+ spinlock_t lock;
- cfs_spinlock_t lock;
- /* Workground to check the system is MP build or UP build */
- cfs_spin_lock_init(&lock);
- cfs_spin_lock(&lock);
- libcfs_is_mp_system = (int)lock.lock;
- /* MP build system: it's a real spin, for UP build system, it
- only raises the IRQL to DISPATCH_LEVEL */
- cfs_spin_unlock(&lock);
+ /* Workground to check the system is MP build or UP build */
+ spin_lock_init(&lock);
+ spin_lock(&lock);
+ libcfs_is_mp_system = (int)lock.lock;
+ /* MP build system: it's a real spin, for UP build system, it
+ * only raises the IRQL to DISPATCH_LEVEL */
+ spin_unlock(&lock);
/* initialize libc routines (confliction between libcnptr.lib
and kernel ntoskrnl.lib) */
/* The global lock to protect all the access */
#if LIBCFS_PROCFS_SPINLOCK
-cfs_spinlock_t proc_fs_lock;
+spinlock_t proc_fs_lock;
-#define INIT_PROCFS_LOCK() cfs_spin_lock_init(&proc_fs_lock)
-#define LOCK_PROCFS() cfs_spin_lock(&proc_fs_lock)
-#define UNLOCK_PROCFS() cfs_spin_unlock(&proc_fs_lock)
+#define INIT_PROCFS_LOCK() spin_lock_init(&proc_fs_lock)
+#define LOCK_PROCFS() spin_lock(&proc_fs_lock)
+#define UNLOCK_PROCFS() spin_unlock(&proc_fs_lock)
#else
-cfs_mutex_t proc_fs_lock;
+struct mutex proc_fs_lock;
#define INIT_PROCFS_LOCK() cfs_init_mutex(&proc_fs_lock)
#define LOCK_PROCFS() cfs_mutex_down(&proc_fs_lock)
file->private_data = p;
}
memset(p, 0, sizeof(*p));
- cfs_mutex_init(&p->lock);
+ mutex_init(&p->lock);
p->op = op;
/*
void *p;
int err = 0;
- cfs_mutex_lock(&m->lock);
+ mutex_lock(&m->lock);
/*
* seq_file->op->..m_start/m_stop/m_next may do special actions
* or optimisations based on the file->f_version, so we want to
else
*ppos += copied;
file->f_version = m->version;
- cfs_mutex_unlock(&m->lock);
+ mutex_unlock(&m->lock);
return copied;
Enomem:
err = -ENOMEM;
struct seq_file *m = (struct seq_file *)file->private_data;
long long retval = -EINVAL;
- cfs_mutex_lock(&m->lock);
+ mutex_lock(&m->lock);
m->version = file->f_version;
switch (origin) {
case 1:
}
}
file->f_version = m->version;
- cfs_mutex_unlock(&m->lock);
+ mutex_unlock(&m->lock);
return retval;
}
EXPORT_SYMBOL(seq_lseek);
waitq->magic = CFS_WAITQ_MAGIC;
waitq->flags = 0;
CFS_INIT_LIST_HEAD(&(waitq->waiters));
- cfs_spin_lock_init(&(waitq->guard));
+ spin_lock_init(&(waitq->guard));
}
/*
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
LASSERT(waitqid < CFS_WAITQ_CHANNELS);
- cfs_spin_lock(&(waitq->guard));
+ spin_lock(&(waitq->guard));
LASSERT(link->waitq[waitqid].waitq == NULL);
link->waitq[waitqid].waitq = waitq;
if (link->flags & CFS_WAITQ_EXCLUSIVE) {
} else {
cfs_list_add(&link->waitq[waitqid].link, &waitq->waiters);
}
- cfs_spin_unlock(&(waitq->guard));
+ spin_unlock(&(waitq->guard));
}
/*
* cfs_waitq_add
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
- cfs_spin_lock(&(waitq->guard));
+ spin_lock(&(waitq->guard));
for (i=0; i < CFS_WAITQ_CHANNELS; i++) {
if (link->waitq[i].waitq == waitq)
cfs_enter_debugger();
}
- cfs_spin_unlock(&(waitq->guard));
+ spin_unlock(&(waitq->guard));
}
/*
LASSERT(waitq != NULL);
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
- cfs_spin_lock(&waitq->guard);
+ spin_lock(&waitq->guard);
cfs_list_for_each_entry_typed(scan, &waitq->waiters,
cfs_waitlink_channel_t,
link) {
break;
}
- cfs_spin_unlock(&waitq->guard);
- return;
+ spin_unlock(&waitq->guard);
+ return;
}
/*
{
PKS_TSDU KsTsdu = NULL;
- cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+ spin_lock(&(ks_data.ksnd_tsdu_lock));
if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
ks_data.ksnd_tsdu_slab, 0);
}
- cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ spin_unlock(&(ks_data.ksnd_tsdu_lock));
if (NULL != KsTsdu) {
RtlZeroMemory(KsTsdu, ks_data.ksnd_tsdu_size);
PKS_TSDU KsTsdu
)
{
- cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
- if (ks_data.ksnd_nfreetsdus > 128) {
- KsFreeKsTsdu(KsTsdu);
- } else {
- cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
- ks_data.ksnd_nfreetsdus++;
- }
- cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ spin_lock(&(ks_data.ksnd_tsdu_lock));
+ if (ks_data.ksnd_nfreetsdus > 128) {
+ KsFreeKsTsdu(KsTsdu);
+ } else {
+ cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+ ks_data.ksnd_nfreetsdus++;
+ }
+ spin_unlock(&(ks_data.ksnd_tsdu_lock));
}
/* with tconn lock acquired */
TsduMgr->NumOfTsdu = 0;
TsduMgr->TotalBytes = 0;
- cfs_spin_lock_init(&TsduMgr->Lock);
+ spin_lock_init(&TsduMgr->Lock);
}
LASSERT(child->kstc_type == kstt_child);
- cfs_spin_lock(&(child->kstc_lock));
+ spin_lock(&(child->kstc_lock));
LASSERT(parent->kstc_state == ksts_listening);
LASSERT(child->kstc_state == ksts_connecting);
FALSE
);
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
KsPrint((2, "KsAcceptCompletionRoutine: singal parent: %p (child: %p)\n",
parent, child));
child->child.kstc_busy = FALSE;
child->kstc_state = ksts_associated;
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
}
/* now free the Irp */
ks_addr_slot_t * slot = NULL;
PLIST_ENTRY list = NULL;
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
list = ks_data.ksnd_addrs_list.Flink;
while (list != &ks_data.ksnd_addrs_list) {
slot = NULL;
}
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
- return slot;
+ return slot;
}
void
KsCleanupIpAddresses()
{
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
while (!IsListEmpty(&ks_data.ksnd_addrs_list)) {
}
cfs_assert(ks_data.ksnd_naddrs == 0);
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
}
VOID
slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
if (slot != NULL) {
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
sprintf(slot->iface, "eth%d", ks_data.ksnd_naddrs++);
slot->ip_addr = ntohl(IpAddress->in_addr);
slot->devname.Length = DeviceName->Length;
slot->devname.MaximumLength = DeviceName->Length + sizeof(WCHAR);
slot->devname.Buffer = slot->buffer;
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
KsPrint((0, "KsAddAddressHandle: %s added: ip=%xh(%d.%d.%d.%d)\n",
slot->iface, IpAddress->in_addr,
/* initialize the global ks_data members */
RtlInitUnicodeString(&ks_data.ksnd_client_name, TDILND_MODULE_NAME);
- cfs_spin_lock_init(&ks_data.ksnd_addrs_lock);
+ spin_lock_init(&ks_data.ksnd_addrs_lock);
InitializeListHead(&ks_data.ksnd_addrs_list);
/* register the pnp handlers */
cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
- cfs_spin_lock(&(child->kstc_lock));
+ spin_lock(&(child->kstc_lock));
if (!child->child.kstc_busy) {
LASSERT(child->kstc_state == ksts_associated);
child->child.kstc_busy = TRUE;
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
break;
} else {
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
child = NULL;
}
}
LASSERT(parent->kstc_type == kstt_listener);
- cfs_spin_lock(&(parent->kstc_lock));
+ spin_lock(&(parent->kstc_lock));
if (parent->kstc_state == ksts_listening) {
if (child) {
- cfs_spin_lock(&(child->kstc_lock));
+ spin_lock(&(child->kstc_lock));
child->child.kstc_info.ConnectionInfo = ConnectionInfo;
child->child.kstc_info.Remote = ConnectionInfo->RemoteAddress;
child->kstc_state = ksts_connecting;
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
} else {
goto errorout;
}
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
return Status;
errorout:
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
*AcceptIrp = NULL;
*ConnectionContext = NULL;
KeSetEvent(&(WorkItem->Event), 0, FALSE);
- cfs_spin_lock(&(tconn->kstc_lock));
- cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
- cfs_spin_unlock(&(tconn->kstc_lock));
- ks_put_tconn(tconn);
+ spin_lock(&(tconn->kstc_lock));
+ cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
+ spin_unlock(&(tconn->kstc_lock));
+ ks_put_tconn(tconn);
}
tconn, DisconnectFlags));
ks_get_tconn(tconn);
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
WorkItem = &(tconn->kstc_disconnect);
}
}
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
ks_put_tconn(tconn);
return (Status);
tconn
);
- cfs_spin_lock_init(&(tconn->kstc_lock));
+ spin_lock_init(&(tconn->kstc_lock));
ks_get_tconn(tconn);
- cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+ spin_lock(&(ks_data.ksnd_tconn_lock));
/* attach it into global list in ks_data */
cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
ks_data.ksnd_ntconns++;
- cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+ spin_unlock(&(ks_data.ksnd_tconn_lock));
tconn->kstc_rcv_wnd = tconn->kstc_snd_wnd = 0x10000;
}
{
LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
- cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+ spin_lock(&(ks_data.ksnd_tconn_lock));
/* remove it from the global list */
cfs_list_del(&tconn->kstc_list);
if (ks_data.ksnd_ntconns == 0) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
- cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+ spin_unlock(&(ks_data.ksnd_tconn_lock));
/* free the structure memory */
cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
{
if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
if ( ( tconn->kstc_type == kstt_child ||
tconn->kstc_type == kstt_sender ) &&
( tconn->kstc_state == ksts_connected ) ) {
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
ks_abort_tconn(tconn);
cfs_set_flag(tconn->kstc_flags, KS_TCONN_DESTROY_BUSY);
}
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
}
}
}
tconn->kstc_addr.FileObject
);
- cfs_spin_lock(&tconn->child.kstc_parent->kstc_lock);
- cfs_spin_lock(&tconn->kstc_lock);
+ spin_lock(&tconn->child.kstc_parent->kstc_lock);
+ spin_lock(&tconn->kstc_lock);
tconn->kstc_state = ksts_inited;
tconn->child.kstc_queued = FALSE;
}
- cfs_spin_unlock(&tconn->kstc_lock);
- cfs_spin_unlock(&tconn->child.kstc_parent->kstc_lock);
+ spin_unlock(&tconn->kstc_lock);
+ spin_unlock(&tconn->child.kstc_parent->kstc_lock);
/* drop the reference of the parent tconn */
ks_put_tconn(tconn->child.kstc_parent);
NULL
);
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
if (NT_SUCCESS(status)) {
tconn->sender.kstc_info.ConnectionInfo = ConnectionInfo;
tconn->sender.kstc_info.Remote = ConnectionInfo->RemoteAddress;
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
} else {
rc = cfs_error_code(status);
tconn->kstc_state = ksts_associated;
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
/* disassocidate the connection and the address object,
after cleanup, it's safe to set the state to abort ... */
cfs_enter_debugger();
}
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
/* cleanup the tsdumgr Lists */
KsCleanupTsdu (tconn);
info->ConnectionInfo = NULL;
info->Remote = NULL;
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
}
status = STATUS_SUCCESS;
WorkItem = &(tconn->kstc_disconnect);
ks_get_tconn(tconn);
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
if (tconn->kstc_state != ksts_connected) {
ks_put_tconn(tconn);
}
}
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
}
engs = &TsduMgr->Slot;
if (!engs->queued) {
- cfs_spin_lock(&engm->lock);
+ spin_lock(&engm->lock);
if (!engs->queued) {
cfs_list_add_tail(&engs->link, &engm->list);
engs->queued = TRUE;
engs->tsdumgr = TsduMgr;
KeSetEvent(&(engm->start),0, FALSE);
}
- cfs_spin_unlock(&engm->lock);
+ spin_unlock(&engm->lock);
KsPrint((4, "KsQueueTdiEngine: TsduMgr=%p is queued to engine %p\n",
TsduMgr, engm));
}
if (engs->queued) {
engm = engs->emgr;
LASSERT(engm != NULL);
- cfs_spin_lock(&engm->lock);
+ spin_lock(&engm->lock);
if (engs->queued) {
cfs_list_del(&engs->link);
engs->queued = FALSE;
engs->emgr = NULL;
engs->tsdumgr = NULL;
}
- cfs_spin_unlock(&engm->lock);
+ spin_unlock(&engm->lock);
KsPrint((4, "KsQueueTdiEngine: TsduMgr %p is removed from engine %p\n",
TsduMgr, engm));
}
cfs_wait_event_internal(&engm->start, 0);
- cfs_spin_lock(&engm->lock);
+ spin_lock(&engm->lock);
if (cfs_list_empty(&engm->list)) {
- cfs_spin_unlock(&engm->lock);
+ spin_unlock(&engm->lock);
continue;
}
LASSERT(engs->queued);
engs->emgr = NULL;
engs->queued = FALSE;
- cfs_spin_unlock(&engm->lock);
+ spin_unlock(&engm->lock);
tconn = engs->tconn;
LASSERT(tconn->kstc_magic == KS_TCONN_MAGIC);
/* initialize tconn related globals */
RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
- cfs_spin_lock_init(&ks_data.ksnd_tconn_lock);
+ spin_lock_init(&ks_data.ksnd_tconn_lock);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
}
/* initialize tsdu related globals */
- cfs_spin_lock_init(&ks_data.ksnd_tsdu_lock);
+ spin_lock_init(&ks_data.ksnd_tsdu_lock);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
goto errorout;
}
for (i = 0; i < ks_data.ksnd_engine_nums; i++) {
- cfs_spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
+ spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
}
/* we need wait until all the tconn are freed */
- cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+ spin_lock(&(ks_data.ksnd_tconn_lock));
if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
- cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+ spin_unlock(&(ks_data.ksnd_tconn_lock));
/* now wait on the tconn exit event */
cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
ks_data.ksnd_tconn_slab = NULL;
/* clean up all the tsud buffers in the free list */
- cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+ spin_lock(&(ks_data.ksnd_tsdu_lock));
cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
- cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ spin_unlock(&(ks_data.ksnd_tsdu_lock));
/* it's safe to delete the tsdu slab ... */
cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
/* create the backlog child tconn */
backlog = ks_create_child_tconn(parent);
- cfs_spin_lock(&(parent->kstc_lock));
+ spin_lock(&(parent->kstc_lock));
if (backlog) {
- cfs_spin_lock(&backlog->kstc_lock);
+ spin_lock(&backlog->kstc_lock);
/* attch it into the listing list of daemon */
cfs_list_add( &backlog->child.kstc_link,
&parent->listener.kstc_listening.list );
parent->listener.kstc_listening.num++;
backlog->child.kstc_queued = TRUE;
- cfs_spin_unlock(&backlog->kstc_lock);
+ spin_unlock(&backlog->kstc_lock);
} else {
cfs_enter_debugger();
}
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
}
}
return rc;
}
- cfs_spin_lock(&(tconn->kstc_lock));
- tconn->listener.nbacklog = nbacklog;
- tconn->kstc_state = ksts_listening;
- cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
+ tconn->listener.nbacklog = nbacklog;
+ tconn->kstc_state = ksts_listening;
+ cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
+ spin_unlock(&(tconn->kstc_lock));
- return rc;
+ return rc;
}
void
/* reset all tdi event callbacks to NULL */
KsResetHandlers (tconn);
- cfs_spin_lock(&tconn->kstc_lock);
+ spin_lock(&tconn->kstc_lock);
cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
ks_put_tconn(backlog);
}
- cfs_spin_unlock(&tconn->kstc_lock);
+ spin_unlock(&tconn->kstc_lock);
/* wake up it from the waiting on new incoming connections */
KeSetEvent(&tconn->listener.kstc_accept_event, 0, FALSE);
ks_replenish_backlogs(parent, parent->listener.nbacklog);
- cfs_spin_lock(&(parent->kstc_lock));
+ spin_lock(&(parent->kstc_lock));
- if (parent->listener.kstc_listening.num <= 0) {
- cfs_spin_unlock(&(parent->kstc_lock));
+ if (parent->listener.kstc_listening.num <= 0) {
+ spin_unlock(&(parent->kstc_lock));
return -1;
}
cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
- cfs_spin_lock(&(backlog->kstc_lock));
+ spin_lock(&(backlog->kstc_lock));
if (backlog->child.kstc_accepted) {
parent->listener.kstc_listening.num--;
backlog->child.kstc_queueno = 1;
- cfs_spin_unlock(&(backlog->kstc_lock));
+ spin_unlock(&(backlog->kstc_lock));
break;
} else {
- cfs_spin_unlock(&(backlog->kstc_lock));
+ spin_unlock(&(backlog->kstc_lock));
backlog = NULL;
}
}
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
/* we need wait until new incoming connections are requested
or the case of shuting down the listenig daemon thread */
NULL
);
- cfs_spin_lock(&(parent->kstc_lock));
+ spin_lock(&(parent->kstc_lock));
/* check whether it's exptected to exit ? */
if (!cfs_is_flag_set(parent->kstc_flags, KS_TCONN_DAEMON_STARTED)) {
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
} else {
goto again;
}
ks_addr_slot_t * slot = NULL;
PLIST_ENTRY list = NULL;
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
list = ks_data.ksnd_addrs_list.Flink;
while (list != &ks_data.ksnd_addrs_list) {
slot = NULL;
}
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
return (int)(slot == NULL);
}
PLIST_ENTRY list = NULL;
int nips = 0;
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
*names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
if (*names == NULL) {
errorout:
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
return nips;
}
{
LASSERT(sock->kstc_type == kstt_listener);
- cfs_spin_lock(&(sock->kstc_lock));
+ spin_lock(&(sock->kstc_lock));
/* clear the daemon flag */
cfs_clear_flag(sock->kstc_flags, KS_TCONN_DAEMON_STARTED);
/* wake up it from the waiting on new incoming connections */
KeSetEvent(&sock->listener.kstc_accept_event, 0, FALSE);
- cfs_spin_unlock(&(sock->kstc_lock));
+ spin_unlock(&(sock->kstc_lock));
}
/*
{
PTRANSPORT_ADDRESS taddr = NULL;
- cfs_spin_lock(&socket->kstc_lock);
+ spin_lock(&socket->kstc_lock);
if (remote) {
if (socket->kstc_type == kstt_sender) {
taddr = socket->sender.kstc_info.Remote;
if (port != NULL)
*port = ntohs (addr->sin_port);
} else {
- cfs_spin_unlock(&socket->kstc_lock);
- return -ENOTCONN;
- }
+ spin_unlock(&socket->kstc_lock);
+ return -ENOTCONN;
+ }
- cfs_spin_unlock(&socket->kstc_lock);
- return 0;
+ spin_unlock(&socket->kstc_lock);
+ return 0;
}
int libcfs_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
int cfs_tracefile_init_arch()
{
int j;
struct cfs_trace_cpu_data *tcd;
- cfs_init_rwsem(&cfs_tracefile_sem);
+ init_rwsem(&cfs_tracefile_sem);
/* initialize trace_data */
memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
cfs_trace_data[i] = NULL;
}
- cfs_fini_rwsem(&cfs_tracefile_sem);
+ fini_rwsem(&cfs_tracefile_sem);
}
void cfs_tracefile_read_lock()
{
- cfs_down_read(&cfs_tracefile_sem);
+ down_read(&cfs_tracefile_sem);
}
void cfs_tracefile_read_unlock()
{
- cfs_up_read(&cfs_tracefile_sem);
+ up_read(&cfs_tracefile_sem);
}
void cfs_tracefile_write_lock()
{
- cfs_down_write(&cfs_tracefile_sem);
+ down_write(&cfs_tracefile_sem);
}
void cfs_tracefile_write_unlock()
{
- cfs_up_write(&cfs_tracefile_sem);
+ up_write(&cfs_tracefile_sem);
}
cfs_trace_buf_type_t cfs_trace_buf_idx_get()
cfs_list_t ws_list; /* chain on global list */
#ifdef __KERNEL__
/** serialised workitems */
- cfs_spinlock_t ws_lock;
+ spinlock_t ws_lock;
/** where schedulers sleep */
cfs_waitq_t ws_waitq;
#endif
struct cfs_workitem_data {
/** serialize */
- cfs_spinlock_t wi_glock;
+ spinlock_t wi_glock;
/** list of all schedulers */
cfs_list_t wi_scheds;
/** WI module is initialized */
static inline void
cfs_wi_sched_lock(cfs_wi_sched_t *sched)
{
- cfs_spin_lock(&sched->ws_lock);
+ spin_lock(&sched->ws_lock);
}
static inline void
cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
{
- cfs_spin_unlock(&sched->ws_lock);
+ spin_unlock(&sched->ws_lock);
}
static inline int
static inline void
cfs_wi_sched_lock(cfs_wi_sched_t *sched)
{
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
static inline void
cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
{
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
}
#endif /* __KERNEL__ */
if (sched->ws_cptab != NULL)
cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
LASSERT(sched->ws_starting == 1);
sched->ws_starting--;
sched->ws_nthreads++;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_wi_sched_lock(sched);
cfs_wi_sched_unlock(sched);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
sched->ws_nthreads--;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
- return 0;
+ return 0;
}
#else /* __KERNEL__ */
int
cfs_wi_check_events (void)
{
- int n = 0;
- cfs_workitem_t *wi;
+ int n = 0;
+ cfs_workitem_t *wi;
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
- for (;;) {
+ for (;;) {
struct cfs_wi_sched *sched = NULL;
struct cfs_wi_sched *tmp;
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
- LASSERT (wi->wi_scheduled);
- wi->wi_scheduled = 0;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ LASSERT(wi->wi_scheduled);
+ wi->wi_scheduled = 0;
+ spin_unlock(&cfs_wi_data.wi_glock);
- n++;
- (*wi->wi_action) (wi);
+ n++;
+ (*wi->wi_action) (wi);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
- }
+ spin_lock(&cfs_wi_data.wi_glock);
+ }
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
- return n;
+ spin_unlock(&cfs_wi_data.wi_glock);
+ return n;
}
#endif
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
if (sched->ws_stopping) {
CDEBUG(D_INFO, "%s is in progress of stopping\n",
sched->ws_name);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
return;
}
LASSERT(!cfs_list_empty(&sched->ws_list));
sched->ws_stopping = 1;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
i = 2;
#ifdef __KERNEL__
cfs_waitq_broadcast(&sched->ws_waitq);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_nthreads > 0) {
CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
"waiting for %d threads of WI sched[%s] to terminate\n",
sched->ws_nthreads, sched->ws_name);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_pause(cfs_time_seconds(1) / 20);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
cfs_list_del(&sched->ws_list);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
#else
SET_BUT_UNUSED(i);
#endif
sched->ws_cpt = cpt;
#ifdef __KERNEL__
- cfs_spin_lock_init(&sched->ws_lock);
+ spin_lock_init(&sched->ws_lock);
cfs_waitq_init(&sched->ws_waitq);
#endif
CFS_INIT_LIST_HEAD(&sched->ws_runq);
rc = 0;
#ifdef __KERNEL__
while (nthrs > 0) {
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_schedule();
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
sched->ws_starting++;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
rc = cfs_create_thread(cfs_wi_scheduler, sched, 0);
if (rc >= 0) {
CERROR("Failed to create thread for WI scheduler %s: %d\n",
name, rc);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
/* make up for cfs_wi_sched_destroy */
cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
sched->ws_starting--;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_wi_sched_destroy(sched);
return rc;
#else
SET_BUT_UNUSED(rc);
#endif
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
*sched_pp = sched;
return 0;
{
memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
- cfs_spin_lock_init(&cfs_wi_data.wi_glock);
+ spin_lock_init(&cfs_wi_data.wi_glock);
CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
cfs_wi_data.wi_init = 1;
{
struct cfs_wi_sched *sched;
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
cfs_wi_data.wi_stopping = 1;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
#ifdef __KERNEL__
/* nobody should contend on this list */
}
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_nthreads != 0) {
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_pause(cfs_time_seconds(1) / 20);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
}
#endif
while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) {
#ifdef __KERNEL__
-#define lnet_ptl_lock(ptl) cfs_spin_lock(&(ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl) cfs_spin_unlock(&(ptl)->ptl_lock)
-#define lnet_eq_wait_lock() cfs_spin_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() cfs_spin_unlock(&the_lnet.ln_eq_wait_lock)
-#define lnet_ni_lock(ni) cfs_spin_lock(&(ni)->ni_lock)
-#define lnet_ni_unlock(ni) cfs_spin_unlock(&(ni)->ni_lock)
-#define LNET_MUTEX_LOCK(m) cfs_mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m) cfs_mutex_unlock(m)
+#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
+#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
+#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
+#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock)
+#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock)
+#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock)
+#define LNET_MUTEX_LOCK(m) mutex_lock(m)
+#define LNET_MUTEX_UNLOCK(m) mutex_unlock(m)
#else /* !__KERNEL__ */
typedef struct lnet_ni {
#ifdef __KERNEL__
- cfs_spinlock_t ni_lock;
+ spinlock_t ni_lock;
#else
# ifndef HAVE_LIBPTHREAD
int ni_lock;
typedef struct lnet_portal {
#ifdef __KERNEL__
- cfs_spinlock_t ptl_lock;
+ spinlock_t ptl_lock;
#else
# ifndef HAVE_LIBPTHREAD
int ptl_lock;
struct lnet_res_container ln_eq_container;
#ifdef __KERNEL__
cfs_waitq_t ln_eq_waitq;
- cfs_spinlock_t ln_eq_wait_lock;
+ spinlock_t ln_eq_wait_lock;
#else
# ifndef HAVE_LIBPTHREAD
int ln_eq_wait_lock;
cfs_list_t ln_rcd_zombie;
#ifdef __KERNEL__
/* serialise startup/shutdown */
- cfs_semaphore_t ln_rc_signal;
+ struct semaphore ln_rc_signal;
- cfs_mutex_t ln_api_mutex;
- cfs_mutex_t ln_lnd_mutex;
+ struct mutex ln_api_mutex;
+ struct mutex ln_lnd_mutex;
#else
# ifndef HAVE_LIBPTHREAD
int ln_api_mutex;
for (i = 0; i < npages; i++) {
if (p->mxg_pages[i] != NULL) {
__free_page(p->mxg_pages[i]);
- cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
- kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
- cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
+ spin_lock(&kmxlnd_data.kmx_mem_lock);
+ kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
+ spin_unlock(&kmxlnd_data.kmx_mem_lock);
}
}
mxlnd_free_pages(p);
return -ENOMEM;