--- /dev/null
+#!/bin/sed -f
+
+# Script to cleanup libcfs macros, it runs against the tree at build time.
+# Migrate libcfs to emulate Linux kernel APIs.
+# http://jira.whamcloud.com/browse/LU-1346
+
+# remove extra blank line
+# /^$/{N;/^\n$/D}
+
+################################################################################
+# lock - spinlock, rw_semaphore, rwlock, completion, semaphore, mutex
+# - lock_kernel, unlock_kernel, lockdep
+
+# spinlok
+/typedef *spinlock_t *cfs_spinlock_t;/d
+s/\bcfs_spinlock_t\b/spinlock_t/g
+s/\bcfs_spin_lock_init\b/spin_lock_init/g
+/#[ \t]*define[ \t]*\bspin_lock_init\b *( *\w* *)[ \t]*\bspin_lock_init\b *( *\w* *)/d
+s/\bcfs_spin_lock\b/spin_lock/g
+/#[ \t]*define[ \t]*\bspin_lock\b *( *\w* *)[ \t]*\bspin_lock\b *( *\w* *)/d
+s/\bcfs_spin_lock_bh\b/spin_lock_bh/g
+/#[ \t]*define[ \t]*\bspin_lock_bh\b *( *\w* *)[ \t]*\bspin_lock_bh\b *( *\w* *)/d
+s/\bcfs_spin_lock_bh_init\b/spin_lock_bh_init/g
+/#[ \t]*define[ \t]*\bspin_lock_bh_init\b *( *\w* *)[ \t]*\bspin_lock_bh_init\b *( *\w* *)/d
+s/\bcfs_spin_unlock\b/spin_unlock/g
+/#[ \t]*define[ \t]*\bspin_unlock\b *( *\w* *)[ \t]*\bspin_unlock\b *( *\w* *)/d
+s/\bcfs_spin_unlock_bh\b/spin_unlock_bh/g
+/#[ \t]*define[ \t]*\bspin_unlock_bh\b *( *\w* *)[ \t]*\bspin_unlock_bh\b *( *\w* *)/d
+s/\bcfs_spin_trylock\b/spin_trylock/g
+/#[ \t]*define[ \t]*\bspin_trylock\b *( *\w* *)[ \t]*\bspin_trylock\b *( *\w* *)/d
+s/\bcfs_spin_is_locked\b/spin_is_locked/g
+/#[ \t]*define[ \t]*\bspin_is_locked\b *( *\w* *)[ \t]*\bspin_is_locked\b *( *\w* *)/d
+
+s/\bcfs_spin_lock_irq\b/spin_lock_irq/g
+/#[ \t]*define[ \t]*\bspin_lock_irq\b *( *\w* *)[ \t]*\bspin_lock_irq\b *( *\w* *)/d
+s/\bcfs_spin_unlock_irq\b/spin_unlock_irq/g
+/#[ \t]*define[ \t]*\bspin_unlock_irq\b *( *\w* *)[ \t]*\bspin_unlock_irq\b *( *\w* *)/d
+s/\bcfs_read_lock_irqsave\b/read_lock_irqsave/g
+/#[ \t]*define[ \t]*\bread_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bread_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_write_lock_irqsave\b/write_lock_irqsave/g
+/#[ \t]*define[ \t]*\bwrite_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bwrite_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_write_unlock_irqrestore\b/write_unlock_irqrestore/g
+/#[ \t]*define[ \t]*\bwrite_unlock_irqrestore\b *( *\w* *, *\w* *)[ \t]*\bwrite_unlock_irqrestore\b *( *\w* *, *\w* *)/d
+s/\bcfs_spin_lock_irqsave\b/spin_lock_irqsave/g
+/#[ \t]*define[ \t]*\bspin_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bspin_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_spin_unlock_irqrestore\b/spin_unlock_irqrestore/g
+/#[ \t]*define[ \t]*\bspin_unlock_irqrestore\b *( *\w* *, *\w* *)[ \t]*\bspin_unlock_irqrestore\b *( *\w* *, *\w* *)/d
+s/\bCFS_SPIN_LOCK_UNLOCKED\b/SPIN_LOCK_UNLOCKED/g
+/#[ \t]*define[ \t]*\bSPIN_LOCK_UNLOCKED\b[ \t]*\bSPIN_LOCK_UNLOCKED\b/d
+
+# rw_semaphore
+s/\bcfs_rw_semaphore_t\b/struct rw_semaphore/g
+s/\bcfs_init_rwsem\b/init_rwsem/g
+/#[ \t]*define[ \t]*\binit_rwsem\b *( *\w* *)[ \t]*\binit_rwsem\b *( *\w* *)/d
+s/\bcfs_down_read\b/down_read/g
+/#[ \t]*define[ \t]*\bdown_read\b *( *\w* *)[ \t]*\bdown_read\b *( *\w* *)/d
+s/\bcfs_down_read_trylock\b/down_read_trylock/g
+/#[ \t]*define[ \t]*\bdown_read_trylock\b *( *\w* *)[ \t]*\bdown_read_trylock\b *( *\w* *)/d
+s/\bcfs_up_read\b/up_read/g
+/#[ \t]*define[ \t]*\bup_read\b *( *\w* *)[ \t]*\bup_read\b *( *\w* *)/d
+s/\bcfs_down_write\b/down_write/g
+/#[ \t]*define[ \t]*\bdown_write\b *( *\w* *)[ \t]*\bdown_write\b *( *\w* *)/d
+s/\bcfs_down_write_trylock\b/down_write_trylock/g
+/#[ \t]*define[ \t]*\bdown_write_trylock\b *( *\w* *)[ \t]*\bdown_write_trylock\b *( *\w* *)/d
+s/\bcfs_up_write\b/up_write/g
+/#[ \t]*define[ \t]*\bup_write\b *( *\w* *)[ \t]*\bup_write\b *( *\w* *)/d
+s/\bcfs_fini_rwsem\b/fini_rwsem/g
+s/\bCFS_DECLARE_RWSEM\b/DECLARE_RWSEM/g
+/#[ \t]*define[ \t]*\bDECLARE_RWSEM\b *( *\w* *)[ \t]*\bDECLARE_RWSEM\b *( *\w* *)/d
+
+s/\bcfs_semaphore\b/semaphore/g
+s/\bcfs_rw_semaphore\b/rw_semaphore/g
+s/\bcfs_init_completion_module\b/init_completion_module/g
+s/\bcfs_call_wait_handler\b/call_wait_handler/g
+s/\bcfs_wait_handler_t\b/wait_handler_t/g
+s/\bcfs_mt_completion_t\b/mt_completion_t/g
+s/\bcfs_mt_init_completion\b/mt_init_completion/g
+s/\bcfs_mt_wait_for_completion\b/mt_wait_for_completion/g
+s/\bcfs_mt_complete\b/mt_complete/g
+s/\bcfs_mt_fini_completion\b/mt_fini_completion/g
+s/\bcfs_mt_atomic_t\b/mt_atomic_t/g
+s/\bcfs_mt_atomic_read\b/mt_atomic_read/g
+s/\bcfs_mt_atomic_set\b/mt_atomic_set/g
+s/\bcfs_mt_atomic_dec_and_test\b/mt_atomic_dec_and_test/g
+s/\bcfs_mt_atomic_inc\b/mt_atomic_inc/g
+s/\bcfs_mt_atomic_dec\b/mt_atomic_dec/g
+s/\bcfs_mt_atomic_add\b/mt_atomic_add/g
+s/\bcfs_mt_atomic_sub\b/mt_atomic_sub/g
+
+# rwlock
+/typedef *rwlock_t *cfs_rwlock_t;/d
+s/\bcfs_rwlock_t\b/rwlock_t/g
+s/\bcfs_rwlock_init\b/rwlock_init/g
+/#[ \t]*define[ \t]*\brwlock_init\b *( *\w* *)[ \t]*\brwlock_init\b *( *\w* *)/d
+s/\bcfs_read_lock\b/read_lock/g
+/#[ \t]*define[ \t]*\bread_lock\b *( *\w* *)[ \t]*\bread_lock\b *( *\w* *)/d
+s/\bcfs_read_unlock\b/read_unlock/g
+/#[ \t]*define[ \t]*\bread_unlock\b *( *\w* *)[ \t]*\bread_unlock\b *( *\w* *)/d
+s/\bcfs_read_unlock_irqrestore\b/read_unlock_irqrestore/g
+#/#[ \t]*define[ \t]*\bread_unlock_irqrestore\b *( *\w* *)[ \t]*\bread_unlock_irqrestore\b *( *\w* *)/d
+/#define read_unlock_irqrestore(lock,flags) \\/{N;d}
+s/\bcfs_write_lock\b/write_lock/g
+/#[ \t]*define[ \t]*\bwrite_lock\b *( *\w* *)[ \t]*\bwrite_lock\b *( *\w* *)/d
+s/\bcfs_write_unlock\b/write_unlock/g
+/#[ \t]*define[ \t]*\bwrite_unlock\b *( *\w* *)[ \t]*\bwrite_unlock\b *( *\w* *)/d
+s/\bcfs_write_lock_bh\b/write_lock_bh/g
+/#[ \t]*define[ \t]*\bwrite_lock_bh\b *( *\w* *)[ \t]*\bwrite_lock_bh\b *( *\w* *)/d
+s/\bcfs_write_unlock_bh\b/write_unlock_bh/g
+/#[ \t]*define[ \t]*\bwrite_unlock_bh\b *( *\w* *)[ \t]*\bwrite_unlock_bh\b *( *\w* *)/d
+s/\bCFS_RW_LOCK_UNLOCKED\b/RW_LOCK_UNLOCKED/g
+/#[ \t]*define[ \t]*\bRW_LOCK_UNLOCKED\b *\bRW_LOCK_UNLOCKED\b */d
+
+# completion
+s/\bcfs_completion_t\b/struct completion/g
+s/\bCFS_DECLARE_COMPLETION\b/DECLARE_COMPLETION/g
+/#[ \t]*define[ \t]*\bDECLARE_COMPLETION\b *( *\w* *)[ \t]*\bDECLARE_COMPLETION\b *( *\w* *)/d
+s/\bCFS_INIT_COMPLETION\b/INIT_COMPLETION/g
+/#[ \t]*define[ \t]*\bINIT_COMPLETION\b *( *\w* *)[ \t]*\bINIT_COMPLETION\b *( *\w* *)/d
+s/\bCFS_COMPLETION_INITIALIZER\b/COMPLETION_INITIALIZER/g
+/#[ \t]*define[ \t]*\bCOMPLETION_INITIALIZER\b *( *\w* *)[ \t]*\bCOMPLETION_INITIALIZER\b *( *\w* *)/d
+s/\bcfs_init_completion\b/init_completion/g
+/#[ \t]*define[ \t]*\binit_completion\b *( *\w* *)[ \t]*\binit_completion\b *( *\w* *)/d
+s/\bcfs_complete\b/complete/g
+/#[ \t]*define[ \t]*\bcomplete\b *( *\w* *)[ \t]*\bcomplete\b *( *\w* *)/d
+s/\bcfs_wait_for_completion\b/wait_for_completion/g
+/#[ \t]*define[ \t]*\bwait_for_completion\b *( *\w* *)[ \t]*\bwait_for_completion\b *( *\w* *)/d
+s/\bcfs_wait_for_completion_interruptible\b/wait_for_completion_interruptible/g
+/#define wait_for_completion_interruptible(c) \\/{N;d}
+s/\bcfs_complete_and_exit\b/complete_and_exit/g
+/#[ \t]*define[ \t]*\bcomplete_and_exit\b *( *\w* *, *\w* *)[ \t]*\bcomplete_and_exit\b *( *\w* *, *\w* *)/d
+s/\bcfs_fini_completion\b/fini_completion/g
+
+# semaphore
+s/\bcfs_semaphore_t\b/struct semaphore/g
+s/\bCFS_DEFINE_SEMAPHORE\b/DEFINE_SEMAPHORE/g
+/#[ \t]*define[ \t]*\bDEFINE_SEMAPHORE\b *( *\w* *)[ \t]*\bDEFINE_SEMAPHORE\b *( *\w* *)/d
+s/\bcfs_sema_init\b/sema_init/g
+/#[ \t]*define[ \t]*\bsema_init\b *( *\w* *, *\w* *)[ \t]*\bsema_init\b *( *\w* *, *\w* *)/d
+s/\bcfs_up\b/up/g
+/#[ \t]*define[ \t]*\bup\b *( *\w* *)[ \t]*\bup\b *( *\w* *)/d
+s/\bcfs_down\b/down/g
+/#[ \t]*define[ \t]*\bdown\b *( *\w* *)[ \t]*\bdown\b *( *\w* *)/d
+s/\bcfs_down_interruptible\b/down_interruptible/g
+/#[ \t]*define[ \t]*\bdown_interruptible\b *( *\w* *)[ \t]*\bdown_interruptible\b *( *\w* *)/d
+s/\bcfs_down_trylock\b/down_trylock/g
+/#[ \t]*define[ \t]*\bdown_trylock\b *( *\w* *)[ \t]*\bdown_trylock\b *( *\w* *)/d
+
+# mutex
+s/\bcfs_mutex_t\b/struct mutex/g
+s/\bCFS_DEFINE_MUTEX\b/DEFINE_MUTEX/g
+/#[ \t]*define[ \t]*\DEFINE_MUTEX\b *( *name *)[ \t]*\bDEFINE_MUTEX\b *( *name *)/d
+s/\bcfs_mutex_init\b/mutex_init/g
+/#[ \t]*define[ \t]*\bmutex_init\b *( *\w* *)[ \t]*\bmutex_init\b *( *\w* *)/d
+s/\bcfs_mutex_lock\b/mutex_lock/g
+/#[ \t]*define[ \t]*\bmutex_lock\b *( *\w* *)[ \t]*\bmutex_lock\b *( *\w* *)/d
+s/\bcfs_mutex_unlock\b/mutex_unlock/g
+/#[ \t]*define[ \t]*\bmutex_unlock\b *( *\w* *)[ \t]*\bmutex_unlock\b *( *\w* *)/d
+s/\bcfs_mutex_lock_interruptible\b/mutex_lock_interruptible/g
+/#[ \t]*define[ \t]*\bmutex_lock_interruptible\b *( *\w* *)[ \t]*\bmutex_lock_interruptible\b *( *\w* *)/d
+s/\bcfs_mutex_trylock\b/mutex_trylock/g
+/#[ \t]*define[ \t]*\bmutex_trylock\b *( *\w* *)[ \t]*\bmutex_trylock\b *( *\w* *)/d
+s/\bcfs_mutex_is_locked\b/mutex_is_locked/g
+/#[ \t]*define[ \t]*\bmutex_is_locked\b *( *\w* *)[ \t]*\bmutex_is_locked\b *( *\w* *)/d
+s/\bcfs_mutex_destroy\b/mutex_destroy/g
+/#[ \t]*define[ \t]*\bmutex_destroy\b *( *\w* *)[ \t]*\bmutex_destroy\b *( *\w* *)/d
+
+# lock_kernel, unlock_kernel
+# s/\bcfs_lock_kernel\b/lock_kernel/g
+# /#[ \t]*define[ \t]*\block_kernel\b *( *)[ \t]*\block_kernel\b *( *)/d
+# s/\bcfs_unlock_kernel\b/unlock_kernel/g
+# /#[ \t]*define[ \t]*\bunlock_kernel\b *( *)[ \t]*\bunlock_kernel\b *( *)/d
+
+# lockdep
+s/\bcfs_lock_class_key\b/lock_class_key/g
+s/\bcfs_lock_class_key_t\b/struct lock_class_key/g
+s/\bcfs_lockdep_set_class\b/lockdep_set_class/g
+s/\bcfs_lockdep_off\b/lockdep_off/g
+s/\bcfs_lockdep_on\b/lockdep_on/g
+/#[ \t]*define[ \t]*\blockdep_off\b *( *)[ \t]*\blockdep_off\b *( *)/d
+/#[ \t]*define[ \t]*\blockdep_on\b *( *)[ \t]*\blockdep_on\b *( *)/d
+/#[ \t]*define[ \t]*\blockdep_set_class\b *( *\w* *, *\w* *)[ \t]*\blockdep_set_class\b *( *\w* *, *\w* *)/d
+
+s/\bcfs_mutex_lock_nested\b/mutex_lock_nested/g
+#/#[ \t]*define[ \t]*\bmutex_lock_nested\b *( *\w* *, *\w* *)[ \t]*\bmutex_lock_nested\b *( *\w* *, *\w* *)/d
+/#define mutex_lock_nested(mutex, subclass) \\/{N;d}
+s/\bcfs_spin_lock_nested\b/spin_lock_nested/g
+/#[ \t]*define[ \t]*\bspin_lock_nested\b *( *\w* *, *\w* *)[ \t]*\bspin_lock_nested\b *( *\w* *, *\w* *)/d
+s/\bcfs_down_read_nested\b/down_read_nested/g
+/#[ \t]*define[ \t]*\bdown_read_nested\b *( *\w* *, *\w* *)[ \t]*\bdown_read_nested\b *( *\w* *, *\w* *)/d
+s/\bcfs_down_write_nested\b/down_write_nested/g
+/#[ \t]*define[ \t]*\bdown_write_nested\b *( *\w* *, *\w* *)[ \t]*\bdown_write_nested\b *( *\w* *, *\w* *)/d
+
+###############################################################################
+# bitops
+
+s/\bcfs_test_bit\b/test_bit/g
+/#[ \t]*define[ \t]*\btest_bit\b *( *\w* *, *\w* *)[ \t]*\btest_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_set_bit\b/set_bit/g
+/#[ \t]*define[ \t]*\bset_bit\b *( *\w* *, *\w* *)[ \t]*\bset_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_clear_bit\b/clear_bit/g
+/#[ \t]*define[ \t]*\bclear_bit\b *( *\w* *, *\w* *)[ \t]*\bclear_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_test_and_set_bit\b/test_and_set_bit/g
+/#[ \t]*define[ \t]*\btest_and_set_bit\b *( *\w* *, *\w* *)[ \t]*\btest_and_set_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_test_and_clear_bit\b/test_and_clear_bit/g
+/#[ \t]*define[ \t]*\btest_and_clear_bit\b *( *\w* *, *\w* *)[ \t]*\btest_and_clear_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_first_bit\b/find_first_bit/g
+/#[ \t]*define[ \t]*\bfind_first_bit\b *( *\w* *, *\w* *)[ \t]*\bfind_first_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_first_zero_bit\b/find_first_zero_bit/g
+/#[ \t]*define[ \t]*\bfind_first_zero_bit\b *( *\w* *, *\w* *)[ \t]*\bfind_first_zero_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_next_bit\b/find_next_bit/g
+/#[ \t]*define[ \t]*\bfind_next_bit\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bfind_next_bit\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_find_next_zero_bit\b/find_next_zero_bit/g
+/#define find_next_zero_bit(addr, size, off) \\/{N;d}
+s/\bcfs_ffz\b/ffz/g
+/#[ \t]*define[ \t]*\bffz\b *( *\w* *)[ \t]*\bffz\b *( *\w* *)/d
+s/\bcfs_ffs\b/ffs/g
+/#[ \t]*define[ \t]*\bffs\b *( *\w* *)[ \t]*\bffs\b *( *\w* *)/d
+s/\bcfs_fls\b/fls/g
+/#[ \t]*define[ \t]*\bfls\b *( *\w* *)[ \t]*\bfls\b *( *\w* *)/d
+
+################################################################################
+# file operations
+
+#s/\bcfs_file_t\b/file_t/g
+#s/\bcfs_dentry_t\b/dentry_t/g
+#s/\bcfs_dirent_t\b/dirent_t/g
+#s/\bcfs_kstatfs_t\b/kstatfs_t/g
+#s/\bcfs_filp_size\b/filp_size/g
+#s/\bcfs_filp_poff\b/filp_poff/g
+#s/\bcfs_filp_open\b/filp_open/g
+#/#[ \t]*define[ \t]*\bfilp_open\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bfilp_open\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_do_fsync\b/do_fsync/g
+#s/\bcfs_filp_close\b/filp_close/g
+#/#[ \t]*define[ \t]*\bfilp_close\b *( *\w* *, *\w* *)[ \t]*\bfilp_close\b *( *\w* *, *\w* *)/d
+#s/\bcfs_filp_read\b/filp_read/g
+#s/\bcfs_filp_write\b/filp_write/g
+#s/\bcfs_filp_fsync\b/filp_fsync/g
+#s/\bcfs_get_file\b/get_file/g
+#/#[ \t]*define[ \t]*\bget_file\b *( *\w* *)[ \t]*\bget_file\b *( *\w* *)/d
+#s/\bcfs_get_fd\b/fget/g
+#/#[ \t]*define[ \t]*\bfget\b *( *\w* *)[ \t]*\bfget\b *( *\w* *)/d
+#s/\bcfs_put_file\b/fput/g
+#/#[ \t]*define[ \t]*\bfput\b *( *\w* *)[ \t]*\bfput\b *( *\w* *)/d
+#s/\bcfs_file_count\b/file_count/g
+#/#[ \t]*define[ \t]*\bfile_count\b *( *\w* *)[ \t]*\bfile_count\b *( *\w* *)/d
+#s/\bCFS_INT_LIMIT\b/INT_LIMIT/g
+#s/\bCFS_OFFSET_MAX\b/OFFSET_MAX/g
+#s/\bcfs_flock_t\b/flock_t/g
+#s/\bcfs_flock_type\b/flock_type/g
+#s/\bcfs_flock_set_type\b/flock_set_type/g
+#s/\bcfs_flock_pid\b/flock_pid/g
+#s/\bcfs_flock_set_pid\b/flock_set_pid/g
+#s/\bcfs_flock_start\b/flock_start/g
+#s/\bcfs_flock_set_start\b/flock_set_start/g
+#s/\bcfs_flock_end\b/flock_end/g
+#s/\bcfs_flock_set_end\b/flock_set_end/g
+#s/\bcfs_user_write\b/user_write/g
+#s/\bCFS_IFSHIFT\b/IFSHIFT/g
+#s/\bCFS_IFTODT\b/IFTODT/g
+#s/\bCFS_DTTOIF\b/DTTOIF/g
+
+################################################################################
+# memory operations
+
+#s/\bcfs_page_t\b/page_t/g
+#s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
+#s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
+#s/\bCFS_PAGE_MASK\b/PAGE_CACHE_MASK/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_MASK\b[ \t]*\bPAGE_CACHE_MASK\b/d
+#s/\bcfs_num_physpages\b/num_physpages/g
+#/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
+#s/\bcfs_copy_from_user\b/copy_from_user/g
+#/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_copy_to_user\b/copy_to_user/g
+#/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_page_address\b/page_address/g
+#/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
+#s/\bcfs_kmap\b/kmap/g
+#/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
+#s/\bcfs_kunmap\b/kunmap/g
+#/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
+#s/\bcfs_get_page\b/get_page/g
+#/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
+#s/\bcfs_page_count\b/page_count/g
+#/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
+#s/\bcfs_page_index\b/page_index/g
+#/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
+#s/\bcfs_page_pin\b/page_cache_get/g
+#/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
+#s/\bcfs_page_unpin\b/page_cache_release/g
+#/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
+#s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
+#s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
+#s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
+#s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
+# memory allocator
+#s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
+#/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
+#s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
+#/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
+#s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
+#/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
+#s/\bCFS_ALLOC_FS\b/__GFP_FS/g
+#/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
+#s/\bCFS_ALLOC_IO\b/__GFP_IO/g
+#/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
+#s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
+#/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
+#s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
+#/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
+#s/\bCFS_ALLOC_USER\b/GFP_KERNEL/g
+#/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
+#s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
+#/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
+#s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
+#/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
+#s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
+#s/\bcfs_alloc\b/kmalloc/g
+#/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
+#s/\bcfs_free\b/kfree/g
+#/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
+#s/\bcfs_alloc_large\b/vmalloc/g
+#/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
+#s/\bcfs_free_large\b/vfree/g
+#/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
+#s/\bcfs_alloc_page\b/alloc_page/g
+#/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
+#s/\bcfs_free_page\b/__free_page/g
+#/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
+# TODO: SLAB allocator
+#s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
+#s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
+#s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
+#s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
+#/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
+#s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
+#/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
+#s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
+#/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
+#s/\bcfs_shrinker\b/shrinker/g
+#/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
+#s/\bcfs_shrinker_t\b/shrinker_t/g
+#/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
+#s/\bcfs_set_shrinker\b/set_shrinker/g
+#/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
+#s/\bcfs_remove_shrinker\b/remove_shrinker/g
+#/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
+#s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
+#/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+
+
+#s/\bcfs_\b//g
+#s/\bCFS_\b//g
+#/typedef[ \t]*\b\b[ \t]*\b\b/d
+#/#[ \t]*define[ \t]*\b\b[ \t]*\b\b/d
+#/#[ \t]*define[ \t]*\b\b *( *)[ \t]*\b\b *( *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *)[ \t]*\b\b *( *\w* *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *, *\w* *)[ \t]*\b\b *( *\w* *, *\w* *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *, *\w* *, *\w* *)[ \t]*\b\b *( *\w* *, *\w* *, *\w* *)/d
static inline
void cfs_bitmap_set(cfs_bitmap_t *bitmap, int nbit)
{
- cfs_set_bit(nbit, bitmap->data);
+ set_bit(nbit, bitmap->data);
}
static inline
void cfs_bitmap_clear(cfs_bitmap_t *bitmap, int nbit)
{
- cfs_test_and_clear_bit(nbit, bitmap->data);
+ test_and_clear_bit(nbit, bitmap->data);
}
static inline
int cfs_bitmap_check(cfs_bitmap_t *bitmap, int nbit)
{
- return cfs_test_bit(nbit, bitmap->data);
+ return test_bit(nbit, bitmap->data);
}
static inline
int cfs_bitmap_test_and_clear(cfs_bitmap_t *bitmap, int nbit)
{
- return cfs_test_and_clear_bit(nbit, bitmap->data);
+ return test_and_clear_bit(nbit, bitmap->data);
}
/* return 0 is bitmap has none set bits */
static inline
int cfs_bitmap_check_empty(cfs_bitmap_t *bitmap)
{
- return cfs_find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
+ return find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
}
static inline
new->size = newsize;
}
-#define cfs_foreach_bit(bitmap, pos) \
- for((pos)=cfs_find_first_bit((bitmap)->data, bitmap->size); \
- (pos) < (bitmap)->size; \
- (pos) = cfs_find_next_bit((bitmap)->data, (bitmap)->size, (pos)+1))
+#define cfs_foreach_bit(bitmap, pos) \
+ for ((pos) = find_first_bit((bitmap)->data, bitmap->size); \
+ (pos) < (bitmap)->size; \
+ (pos) = find_next_bit((bitmap)->data, (bitmap)->size, (pos) + 1))
#endif
/*
* spin_lock (use Linux kernel's primitives)
- *
+ *
* - spin_lock_init(x)
* - spin_lock(x)
* - spin_unlock(x)
* - spin_trylock(x)
- *
+ *
* - spin_lock_irqsave(x, f)
* - spin_unlock_irqrestore(x, f)
*/
struct cfs_hash_hlist_ops;
typedef union {
- cfs_rwlock_t rw; /**< rwlock */
- cfs_spinlock_t spin; /**< spinlock */
+ rwlock_t rw; /**< rwlock */
+ spinlock_t spin; /**< spinlock */
} cfs_hash_lock_t;
/**
cfs_hash_bucket_t **hs_rehash_buckets;
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
/** serialize debug members */
- cfs_spinlock_t hs_dep_lock;
+ spinlock_t hs_dep_lock;
/** max depth */
unsigned int hs_dep_max;
/** id of the deepest bucket */
/* exclusively locked */
unsigned int pcl_locked;
/* private lock table */
- cfs_spinlock_t **pcl_locks;
+ spinlock_t **pcl_locks;
};
/* return number of private locks */
*/
#include <linux/bitops.h>
-#define cfs_test_bit(nr, addr) test_bit(nr, addr)
-#define cfs_set_bit(nr, addr) set_bit(nr, addr)
-#define cfs_clear_bit(nr, addr) clear_bit(nr, addr)
-#define cfs_test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
-#define cfs_test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
-#define cfs_find_first_bit(addr, size) find_first_bit(addr, size)
-#define cfs_find_first_zero_bit(addr, size) find_first_zero_bit(addr, size)
-#define cfs_find_next_bit(addr, size, off) find_next_bit(addr, size, off)
-#define cfs_find_next_zero_bit(addr, size, off) \
- find_next_zero_bit(addr, size, off)
-#define cfs_ffz(x) ffz(x)
-#define cfs_ffs(x) ffs(x)
-#define cfs_fls(x) fls(x)
* spinlock "implementation"
*/
-typedef spinlock_t cfs_spinlock_t;
-
-#define cfs_spin_lock_init(lock) spin_lock_init(lock)
-#define cfs_spin_lock(lock) spin_lock(lock)
-#define cfs_spin_lock_bh(lock) spin_lock_bh(lock)
-#define cfs_spin_lock_bh_init(lock) spin_lock_bh_init(lock)
-#define cfs_spin_unlock(lock) spin_unlock(lock)
-#define cfs_spin_unlock_bh(lock) spin_unlock_bh(lock)
-#define cfs_spin_trylock(lock) spin_trylock(lock)
-#define cfs_spin_is_locked(lock) spin_is_locked(lock)
-
-#define cfs_spin_lock_irq(lock) spin_lock_irq(lock)
-#define cfs_spin_unlock_irq(lock) spin_unlock_irq(lock)
-#define cfs_read_lock_irqsave(lock, f) read_lock_irqsave(lock, f)
-#define cfs_write_lock_irqsave(lock, f) write_lock_irqsave(lock, f)
-#define cfs_write_unlock_irqrestore(lock, f) write_unlock_irqrestore(lock, f)
-#define cfs_spin_lock_irqsave(lock, f) spin_lock_irqsave(lock, f)
-#define cfs_spin_unlock_irqrestore(lock, f) spin_unlock_irqrestore(lock, f)
+
+
/*
* rw_semaphore "implementation" (use Linux kernel's primitives)
* - down_write(x)
* - up_write(x)
*/
-typedef struct rw_semaphore cfs_rw_semaphore_t;
-#define cfs_init_rwsem(s) init_rwsem(s)
-#define cfs_down_read(s) down_read(s)
-#define cfs_down_read_trylock(s) down_read_trylock(s)
-#define cfs_up_read(s) up_read(s)
-#define cfs_down_write(s) down_write(s)
-#define cfs_down_write_trylock(s) down_write_trylock(s)
-#define cfs_up_write(s) up_write(s)
-#define cfs_fini_rwsem(s) do {} while(0)
+#define fini_rwsem(s) do {} while (0)
-#define CFS_DECLARE_RWSEM(name) DECLARE_RWSEM(name)
/*
* rwlock_t "implementation" (use Linux kernel's primitives)
*
* - RW_LOCK_UNLOCKED
*/
-typedef rwlock_t cfs_rwlock_t;
-
-#define cfs_rwlock_init(lock) rwlock_init(lock)
-#define cfs_read_lock(lock) read_lock(lock)
-#define cfs_read_unlock(lock) read_unlock(lock)
-#define cfs_read_unlock_irqrestore(lock,flags) \
- read_unlock_irqrestore(lock, flags)
-#define cfs_write_lock(lock) write_lock(lock)
-#define cfs_write_unlock(lock) write_unlock(lock)
-#define cfs_write_lock_bh(lock) write_lock_bh(lock)
-#define cfs_write_unlock_bh(lock) write_unlock_bh(lock)
+
#ifndef DEFINE_RWLOCK
#define DEFINE_RWLOCK(lock) rwlock_t lock = __RW_LOCK_UNLOCKED(lock)
* - wait_for_completion_interruptible(c)
* - fini_completion(c)
*/
-typedef struct completion cfs_completion_t;
-
-#define CFS_DECLARE_COMPLETION(work) DECLARE_COMPLETION(work)
-#define CFS_INIT_COMPLETION(c) INIT_COMPLETION(c)
-#define CFS_COMPLETION_INITIALIZER(work) COMPLETION_INITIALIZER(work)
-#define cfs_init_completion(c) init_completion(c)
-#define cfs_complete(c) complete(c)
-#define cfs_wait_for_completion(c) wait_for_completion(c)
-#define cfs_wait_for_completion_interruptible(c) \
- wait_for_completion_interruptible(c)
-#define cfs_complete_and_exit(c, code) complete_and_exit(c, code)
-#define cfs_fini_completion(c) do { } while (0)
+#define fini_completion(c) do { } while (0)
/*
* semaphore "implementation" (use Linux kernel's primitives)
* - down_interruptible(sem)
* - down_trylock(sem)
*/
-typedef struct semaphore cfs_semaphore_t;
-
-#ifdef DEFINE_SEMAPHORE
-#define CFS_DEFINE_SEMAPHORE(name) DEFINE_SEMAPHORE(name)
-#else
-#define CFS_DEFINE_SEMAPHORE(name) DECLARE_MUTEX(name)
-#endif
-
-#define cfs_sema_init(sem, val) sema_init(sem, val)
-#define cfs_up(x) up(x)
-#define cfs_down(x) down(x)
-#define cfs_down_interruptible(x) down_interruptible(x)
-#define cfs_down_trylock(x) down_trylock(x)
/*
* mutex "implementation" (use Linux kernel's primitives)
* - mutex_is_locked(x)
* - mutex_destroy(x)
*/
-typedef struct mutex cfs_mutex_t;
-
-#define CFS_DEFINE_MUTEX(name) DEFINE_MUTEX(name)
-
-#define cfs_mutex_init(x) mutex_init(x)
-#define cfs_mutex_lock(x) mutex_lock(x)
-#define cfs_mutex_unlock(x) mutex_unlock(x)
-#define cfs_mutex_lock_interruptible(x) mutex_lock_interruptible(x)
-#define cfs_mutex_trylock(x) mutex_trylock(x)
-#define cfs_mutex_is_locked(x) mutex_is_locked(x)
-#define cfs_mutex_destroy(x) mutex_destroy(x)
#ifndef lockdep_set_class
*
**************************************************************************/
-typedef struct cfs_lock_class_key {
- ;
-} cfs_lock_class_key_t;
+struct lock_class_key {
+ ;
+};
-#define cfs_lockdep_set_class(lock, key) \
- do { (void)sizeof (lock);(void)sizeof (key); } while (0)
-/* This has to be a macro, so that `subclass' can be undefined in kernels that
- * do not support lockdep. */
+#define lockdep_set_class(lock, key) \
+ do { (void)sizeof(lock); (void)sizeof(key); } while (0)
+/* This has to be a macro, so that `subclass' can be undefined in kernels
+ * that do not support lockdep. */
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
{
}
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
{
}
#else
-typedef struct lock_class_key cfs_lock_class_key_t;
-#define cfs_lockdep_set_class(lock, key) lockdep_set_class(lock, key)
-#define cfs_lockdep_off() lockdep_off()
-#define cfs_lockdep_on() lockdep_on()
#endif /* lockdep_set_class */
#ifndef CONFIG_DEBUG_LOCK_ALLOC
#ifndef mutex_lock_nested
-#define cfs_mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
-#else
-#define cfs_mutex_lock_nested(mutex, subclass) \
- mutex_lock_nested(mutex, subclass)
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
#endif
#ifndef spin_lock_nested
-#define cfs_spin_lock_nested(lock, subclass) spin_lock(lock)
-#else
-#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
#endif
#ifndef down_read_nested
-#define cfs_down_read_nested(lock, subclass) down_read(lock)
-#else
-#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
+#define down_read_nested(lock, subclass) down_read(lock)
#endif
#ifndef down_write_nested
-#define cfs_down_write_nested(lock, subclass) down_write(lock)
-#else
-#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
+#define down_write_nested(lock, subclass) down_write(lock)
#endif
-#else /* CONFIG_DEBUG_LOCK_ALLOC is defined */
-#define cfs_mutex_lock_nested(mutex, subclass) \
- mutex_lock_nested(mutex, subclass)
-#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
-#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
-#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
#endif /* CONFIG_DEBUG_LOCK_ALLOC */
// XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
#define SIGNAL_MASK_LOCK(task, flags) \
- spin_lock_irqsave(&task->sighand->siglock, flags)
+ spin_lock_irqsave(&task->sighand->siglock, flags)
#define SIGNAL_MASK_UNLOCK(task, flags) \
- spin_unlock_irqrestore(&task->sighand->siglock, flags)
+ spin_unlock_irqrestore(&task->sighand->siglock, flags)
#define USERMODEHELPER(path, argv, envp) \
- call_usermodehelper(path, argv, envp, 1)
+ call_usermodehelper(path, argv, envp, 1)
#define RECALC_SIGPENDING recalc_sigpending()
#define CLEAR_SIGPENDING clear_tsk_thread_flag(current, \
TIF_SIGPENDING)
};
struct upcall_cache {
- cfs_list_t uc_hashtable[UC_CACHE_HASH_SIZE];
- cfs_spinlock_t uc_lock;
- cfs_rwlock_t uc_upcall_rwlock;
-
- char uc_name[40]; /* for upcall */
- char uc_upcall[UC_CACHE_UPCALL_MAXPATH];
- int uc_acquire_expire; /* seconds */
- int uc_entry_expire; /* seconds */
- struct upcall_cache_ops *uc_ops;
+ cfs_list_t uc_hashtable[UC_CACHE_HASH_SIZE];
+ spinlock_t uc_lock;
+ rwlock_t uc_upcall_rwlock;
+
+ char uc_name[40]; /* for upcall */
+ char uc_upcall[UC_CACHE_UPCALL_MAXPATH];
+ int uc_acquire_expire; /* seconds */
+ int uc_entry_expire; /* seconds */
+ struct upcall_cache_ops *uc_ops;
};
struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
#define cfs_seq_open(file, ops, rc) (rc = seq_open(file, ops))
/* in lprocfs_stat.c, to protect the private data for proc entries */
-extern cfs_rw_semaphore_t _lprocfs_lock;
+extern struct rw_semaphore _lprocfs_lock;
/* to begin from 2.6.23, Linux defines self file_operations (proc_reg_file_ops)
* in procfs, the proc file_operation defined by Lustre (lprocfs_generic_fops)
*/
#ifndef HAVE_PROCFS_USERS
-#define LPROCFS_ENTRY() \
-do { \
- cfs_down_read(&_lprocfs_lock); \
+#define LPROCFS_ENTRY() \
+do { \
+ down_read(&_lprocfs_lock); \
} while(0)
-#define LPROCFS_EXIT() \
-do { \
- cfs_up_read(&_lprocfs_lock); \
+#define LPROCFS_EXIT() \
+do { \
+ up_read(&_lprocfs_lock); \
} while(0)
#else
static inline
int LPROCFS_ENTRY_AND_CHECK(struct proc_dir_entry *dp)
{
- int deleted = 0;
- spin_lock(&(dp)->pde_unload_lock);
- if (dp->proc_fops == NULL)
- deleted = 1;
- spin_unlock(&(dp)->pde_unload_lock);
- if (deleted)
- return -ENODEV;
- return 0;
+ int deleted = 0;
+
+ spin_lock(&(dp)->pde_unload_lock);
+ if (dp->proc_fops == NULL)
+ deleted = 1;
+ spin_unlock(&(dp)->pde_unload_lock);
+ if (deleted)
+ return -ENODEV;
+ return 0;
}
#else /* !HAVE_PROCFS_DELETED*/
static inline
up_read(&_lprocfs_lock); \
} while(0)
-#define LPROCFS_WRITE_ENTRY() \
-do { \
- cfs_down_write(&_lprocfs_lock); \
+#define LPROCFS_WRITE_ENTRY() \
+do { \
+ down_write(&_lprocfs_lock); \
} while(0)
-#define LPROCFS_WRITE_EXIT() \
-do { \
- cfs_up_write(&_lprocfs_lock); \
+#define LPROCFS_WRITE_EXIT() \
+do { \
+ up_write(&_lprocfs_lock); \
} while(0)
#else /* !LPROCFS */
size_t count;
loff_t index;
loff_t version;
- cfs_mutex_t lock;
+ struct mutex lock;
struct cfs_seq_operations *op;
void *private;
} cfs_seq_file_t;
#define __LIBCFS_USER_BITOPS_H__
/* test if bit nr is set in bitmap addr; returns previous value of bit nr */
-static __inline__ int cfs_test_and_set_bit(int nr, unsigned long *addr)
+static inline int test_and_set_bit(int nr, unsigned long *addr)
{
unsigned long mask;
return nr;
}
-#define cfs_set_bit(n, a) cfs_test_and_set_bit(n, a)
+#define set_bit(n, a) test_and_set_bit(n, a)
/* clear bit nr in bitmap addr; returns previous value of bit nr*/
-static __inline__ int cfs_test_and_clear_bit(int nr, unsigned long *addr)
+static inline int test_and_clear_bit(int nr, unsigned long *addr)
{
unsigned long mask;
return nr;
}
-#define cfs_clear_bit(n, a) cfs_test_and_clear_bit(n, a)
+#define clear_bit(n, a) test_and_clear_bit(n, a)
-static __inline__ int cfs_test_bit(int nr, const unsigned long *addr)
+static inline int test_bit(int nr, const unsigned long *addr)
{
return ((1UL << (nr & (BITS_PER_LONG - 1))) &
((addr)[nr / BITS_PER_LONG])) != 0;
#define __cfs_ffz(x) __cfs_ffs(~(x))
#define __cfs_flz(x) __cfs_fls(~(x))
-unsigned long cfs_find_next_bit(unsigned long *addr,
- unsigned long size, unsigned long offset);
+unsigned long find_next_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset);
-unsigned long cfs_find_next_zero_bit(unsigned long *addr,
- unsigned long size, unsigned long offset);
+unsigned long find_next_zero_bit(unsigned long *addr,
+ unsigned long size, unsigned long offset);
-#define cfs_find_first_bit(addr,size) (cfs_find_next_bit((addr),(size),0))
-#define cfs_find_first_zero_bit(addr,size) \
- (cfs_find_next_zero_bit((addr),(size),0))
+#define find_first_bit(addr, size) find_next_bit((addr), (size),0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size),0)
#endif
*/
/*
- * cfs_spin_lock
+ * spin_lock
*
- * - cfs_spin_lock_init(x)
- * - cfs_spin_lock(x)
- * - cfs_spin_unlock(x)
- * - cfs_spin_trylock(x)
- * - cfs_spin_lock_bh_init(x)
- * - cfs_spin_lock_bh(x)
- * - cfs_spin_unlock_bh(x)
+ * - spin_lock_init(x)
+ * - spin_lock(x)
+ * - spin_unlock(x)
+ * - spin_trylock(x)
+ * - spin_lock_bh_init(x)
+ * - spin_lock_bh(x)
+ * - spin_unlock_bh(x)
*
- * - cfs_spin_is_locked(x)
- * - cfs_spin_lock_irqsave(x, f)
- * - cfs_spin_unlock_irqrestore(x, f)
+ * - spin_is_locked(x)
+ * - spin_lock_irqsave(x, f)
+ * - spin_unlock_irqrestore(x, f)
*
* No-op implementation.
*/
-struct cfs_spin_lock {int foo;};
+struct spin_lock { int foo; };
-typedef struct cfs_spin_lock cfs_spinlock_t;
+typedef struct spin_lock spinlock_t;
-#define DEFINE_SPINLOCK(lock) cfs_spinlock_t lock = { }
-#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
-#define LASSERT_MUTEX_LOCKED(x) do {(void)sizeof(x);} while(0)
+#define DEFINE_SPINLOCK(lock) spinlock_t lock = { }
+#define LASSERT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
+#define LINVRNT_SPIN_LOCKED(lock) do { (void)sizeof(lock); } while (0)
+#define LASSERT_SEM_LOCKED(sem) do { (void)sizeof(sem); } while (0)
+#define LASSERT_MUTEX_LOCKED(x) do { (void)sizeof(x); } while (0)
-void cfs_spin_lock_init(cfs_spinlock_t *lock);
-void cfs_spin_lock(cfs_spinlock_t *lock);
-void cfs_spin_unlock(cfs_spinlock_t *lock);
-int cfs_spin_trylock(cfs_spinlock_t *lock);
-void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
-void cfs_spin_lock_bh(cfs_spinlock_t *lock);
-void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
+void spin_lock_init(spinlock_t *lock);
+void spin_lock(spinlock_t *lock);
+void spin_unlock(spinlock_t *lock);
+int spin_trylock(spinlock_t *lock);
+void spin_lock_bh_init(spinlock_t *lock);
+void spin_lock_bh(spinlock_t *lock);
+void spin_unlock_bh(spinlock_t *lock);
-static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
-static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
-static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
- unsigned long f){}
+static inline int spin_is_locked(spinlock_t *l) { return 1; }
+static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
+static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
/*
* Semaphore
*
- * - cfs_sema_init(x, v)
+ * - sema_init(x, v)
* - __down(x)
* - __up(x)
*/
-typedef struct cfs_semaphore {
- int foo;
-} cfs_semaphore_t;
+struct semaphore {
+ int foo;
+};
-void cfs_sema_init(cfs_semaphore_t *s, int val);
-void __up(cfs_semaphore_t *s);
-void __down(cfs_semaphore_t *s);
-int __down_interruptible(cfs_semaphore_t *s);
+void sema_init(struct semaphore *s, int val);
+void __up(struct semaphore *s);
+void __down(struct semaphore *s);
+int __down_interruptible(struct semaphore *s);
-#define CFS_DEFINE_SEMAPHORE(name) cfs_semaphore_t name = { 1 }
+#define DEFINE_SEMAPHORE(name) struct semaphore name = { 1 }
-#define cfs_up(s) __up(s)
-#define cfs_down(s) __down(s)
-#define cfs_down_interruptible(s) __down_interruptible(s)
+#define up(s) __up(s)
+#define down(s) __down(s)
+#define down_interruptible(s) __down_interruptible(s)
-static inline int cfs_down_trylock(cfs_semaphore_t *sem)
+static inline int down_trylock(struct semaphore *sem)
{
return 0;
}
/*
* Completion:
*
- * - cfs_init_completion_module(c)
- * - cfs_call_wait_handler(t)
- * - cfs_init_completion(c)
- * - cfs_complete(c)
- * - cfs_wait_for_completion(c)
- * - cfs_wait_for_completion_interruptible(c)
+ * - init_completion_module(c)
+ * - call_wait_handler(t)
+ * - init_completion(c)
+ * - complete(c)
+ * - wait_for_completion(c)
+ * - wait_for_completion_interruptible(c)
*/
-typedef struct {
- unsigned int done;
- cfs_waitq_t wait;
-} cfs_completion_t;
+struct completion {
+ unsigned int done;
+ cfs_waitq_t wait;
+};
-typedef int (*cfs_wait_handler_t) (int timeout);
-void cfs_init_completion_module(cfs_wait_handler_t handler);
-int cfs_call_wait_handler(int timeout);
-void cfs_init_completion(cfs_completion_t *c);
-void cfs_complete(cfs_completion_t *c);
-void cfs_wait_for_completion(cfs_completion_t *c);
-int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
+typedef int (*wait_handler_t) (int timeout);
+void init_completion_module(wait_handler_t handler);
+int call_wait_handler(int timeout);
+void init_completion(struct completion *c);
+void complete(struct completion *c);
+void wait_for_completion(struct completion *c);
+int wait_for_completion_interruptible(struct completion *c);
-#define CFS_COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+#define COMPLETION_INITIALIZER(work) \
+ { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
-#define CFS_DECLARE_COMPLETION(work) \
- cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
-#define CFS_INIT_COMPLETION(x) ((x).done = 0)
+#define INIT_COMPLETION(x) ((x).done = 0)
/*
- * cfs_rw_semaphore:
+ * rw_semaphore:
*
- * - cfs_init_rwsem(x)
- * - cfs_down_read(x)
- * - cfs_down_read_trylock(x)
- * - cfs_down_write(struct cfs_rw_semaphore *s);
- * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
- * - cfs_up_read(x)
- * - cfs_up_write(x)
- * - cfs_fini_rwsem(x)
+ * - init_rwsem(x)
+ * - down_read(x)
+ * - down_read_trylock(x)
+ * - down_write(struct rw_semaphore *s);
+ * - down_write_trylock(struct rw_semaphore *s);
+ * - up_read(x)
+ * - up_write(x)
+ * - fini_rwsem(x)
*/
-typedef struct cfs_rw_semaphore {
- int foo;
-} cfs_rw_semaphore_t;
-
-void cfs_init_rwsem(cfs_rw_semaphore_t *s);
-void cfs_down_read(cfs_rw_semaphore_t *s);
-int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
-void cfs_down_write(cfs_rw_semaphore_t *s);
-int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
-void cfs_up_read(cfs_rw_semaphore_t *s);
-void cfs_up_write(cfs_rw_semaphore_t *s);
-void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
-#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name = { }
+struct rw_semaphore {
+ int foo;
+};
+
+void init_rwsem(struct rw_semaphore *s);
+void down_read(struct rw_semaphore *s);
+int down_read_trylock(struct rw_semaphore *s);
+void down_write(struct rw_semaphore *s);
+int down_write_trylock(struct rw_semaphore *s);
+void up_read(struct rw_semaphore *s);
+void up_write(struct rw_semaphore *s);
+void fini_rwsem(struct rw_semaphore *s);
+#define DECLARE_RWSEM(name) struct rw_semaphore name = { }
/*
* read-write lock : Need to be investigated more!!
* XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
*
- * - cfs_rwlock_init(x)
- * - cfs_read_lock(x)
- * - cfs_read_unlock(x)
- * - cfs_write_lock(x)
- * - cfs_write_unlock(x)
- * - cfs_write_lock_irqsave(x)
- * - cfs_write_unlock_irqrestore(x)
- * - cfs_read_lock_irqsave(x)
- * - cfs_read_unlock_irqrestore(x)
+ * - rwlock_init(x)
+ * - read_lock(x)
+ * - read_unlock(x)
+ * - write_lock(x)
+ * - write_unlock(x)
+ * - write_lock_irqsave(x)
+ * - write_unlock_irqrestore(x)
+ * - read_lock_irqsave(x)
+ * - read_unlock_irqrestore(x)
*/
-typedef cfs_rw_semaphore_t cfs_rwlock_t;
-#define DEFINE_RWLOCK(lock) cfs_rwlock_t lock = { }
+#define rwlock_t struct rw_semaphore
+#define DEFINE_RWLOCK(lock) rwlock_t lock = { }
+
+#define rwlock_init(pl) init_rwsem(pl)
-#define cfs_rwlock_init(pl) cfs_init_rwsem(pl)
+#define read_lock(l) down_read(l)
+#define read_unlock(l) up_read(l)
+#define write_lock(l) down_write(l)
+#define write_unlock(l) up_write(l)
+
+static inline void write_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+ write_lock(l);
+}
-#define cfs_read_lock(l) cfs_down_read(l)
-#define cfs_read_unlock(l) cfs_up_read(l)
-#define cfs_write_lock(l) cfs_down_write(l)
-#define cfs_write_unlock(l) cfs_up_write(l)
+static inline void write_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+ write_unlock(l);
+}
-static inline void
-cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
-static inline void
-cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
+static inline void read_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+ read_lock(l);
+}
-static inline void
-cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
-static inline void
-cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
+static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+ read_unlock(l);
+}
/*
* Atomic for single-threaded user-space
int c_done;
pthread_cond_t c_cond;
pthread_mutex_t c_mut;
-} cfs_mt_completion_t;
+} mt_completion_t;
-void cfs_mt_init_completion(cfs_mt_completion_t *c);
-void cfs_mt_fini_completion(cfs_mt_completion_t *c);
-void cfs_mt_complete(cfs_mt_completion_t *c);
-void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
+void mt_init_completion(mt_completion_t *c);
+void mt_fini_completion(mt_completion_t *c);
+void mt_complete(mt_completion_t *c);
+void mt_wait_for_completion(mt_completion_t *c);
/*
* Multi-threaded user space atomic APIs
*/
-typedef struct { volatile int counter; } cfs_mt_atomic_t;
+typedef struct { volatile int counter; } mt_atomic_t;
-int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
-int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
-void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
+int mt_atomic_read(mt_atomic_t *a);
+void mt_atomic_set(mt_atomic_t *a, int b);
+int mt_atomic_dec_and_test(mt_atomic_t *a);
+void mt_atomic_inc(mt_atomic_t *a);
+void mt_atomic_dec(mt_atomic_t *a);
+void mt_atomic_add(int b, mt_atomic_t *a);
+void mt_atomic_sub(int b, mt_atomic_t *a);
#endif /* HAVE_LIBPTHREAD */
* Mutex interface.
*
**************************************************************************/
-typedef struct cfs_semaphore cfs_mutex_t;
+#define mutex semaphore
-#define CFS_DEFINE_MUTEX(m) CFS_DEFINE_SEMAPHORE(m)
+#define DEFINE_MUTEX(m) DEFINE_SEMAPHORE(m)
-static inline void cfs_mutex_init(cfs_mutex_t *mutex)
+static inline void mutex_init(struct mutex *mutex)
{
- cfs_sema_init(mutex, 1);
+ sema_init(mutex, 1);
}
-static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
+static inline void mutex_lock(struct mutex *mutex)
{
- cfs_down(mutex);
+ down(mutex);
}
-static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
+static inline void mutex_unlock(struct mutex *mutex)
{
- cfs_up(mutex);
+ up(mutex);
}
-static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
+static inline int mutex_lock_interruptible(struct mutex *mutex)
{
- return cfs_down_interruptible(mutex);
+ return down_interruptible(mutex);
}
/**
* \retval 1 try-lock succeeded (lock acquired).
* \retval 0 indicates lock contention.
*/
-static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
+static inline int mutex_trylock(struct mutex *mutex)
{
- return !cfs_down_trylock(mutex);
+ return !down_trylock(mutex);
}
-static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
+static inline void mutex_destroy(struct mutex *lock)
{
}
*
* \retval 0 mutex is not locked. This should never happen.
*/
-static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
+static inline int mutex_is_locked(struct mutex *lock)
{
return 1;
}
*
**************************************************************************/
-typedef struct cfs_lock_class_key {
+struct lock_class_key {
int foo;
-} cfs_lock_class_key_t;
+};
-static inline void cfs_lockdep_set_class(void *lock,
- cfs_lock_class_key_t *key)
+static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
{
}
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
{
}
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
{
}
-#define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
-#define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
-#define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
-#define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
+#define down_read_nested(lock, subclass) down_read(lock)
+#define down_write_nested(lock, subclass) down_write(lock)
/* !__KERNEL__ */
unsigned long simple_strtoul(const char *cp,char **endp, unsigned int base);
-static inline int cfs_set_bit(int nr, void * addr)
+static inline int set_bit(int nr, void * addr)
{
(((volatile ULONG *) addr)[nr >> 5]) |= (1UL << (nr & 31));
return *((int *) addr);
}
-static inline int cfs_test_bit(int nr, void * addr)
+static inline int test_bit(int nr, void * addr)
{
return (int)(((1UL << (nr & 31)) & (((volatile ULONG *) addr)[nr >> 5])) != 0);
}
-static inline int cfs_clear_bit(int nr, void * addr)
+static inline int clear_bit(int nr, void * addr)
{
(((volatile ULONG *) addr)[nr >> 5]) &= (~(1UL << (nr & 31)));
return *((int *) addr);
}
-static inline int cfs_test_and_set_bit(int nr, volatile void *addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
{
int rc;
unsigned char mask;
return rc;
}
-#define ext2_set_bit(nr,addr) (cfs_set_bit(nr, addr), 0)
-#define ext2_clear_bit(nr,addr) (cfs_clear_bit(nr, addr), 0)
-#define ext2_test_bit(nr,addr) cfs_test_bit(nr, addr)
+#define ext2_set_bit(nr, addr) (set_bit(nr, addr), 0)
+#define ext2_clear_bit(nr, addr) (clear_bit(nr, addr), 0)
+#define ext2_test_bit(nr, addr) test_bit(nr, addr)
-static inline int cfs_ffs(int x)
+static inline int ffs(int x)
{
int r = 1;
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
static inline
-int cfs_fls(int x)
+int fls(int x)
{
int r = 32;
return r;
}
-static inline unsigned cfs_find_first_bit(const unsigned long *addr,
+static inline unsigned find_first_bit(const unsigned long *addr,
unsigned size)
{
unsigned x = 0;
int i_uid;
int i_gid;
__u32 i_flags;
- cfs_mutex_t i_sem;
+ struct mutex i_sem;
void * i_priv;
};
* spinlock & event definitions
*/
-typedef struct cfs_spin_lock cfs_spinlock_t;
+typedef struct spin_lock spinlock_t;
/* atomic */
#define cfs_atomic_inc_return(v) cfs_atomic_add_return(1, v)
#define cfs_atomic_dec_return(v) cfs_atomic_sub_return(1, v)
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock);
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock);
/* event */
*
*/
-struct cfs_spin_lock {
- KSPIN_LOCK lock;
- KIRQL irql;
+struct spin_lock {
+ KSPIN_LOCK lock;
+ KIRQL irql;
};
-#define CFS_DECL_SPIN(name) cfs_spinlock_t name;
-#define CFS_DECL_SPIN_EXTERN(name) extern cfs_spinlock_t name;
+#define CFS_DECL_SPIN(name) spinlock_t name;
+#define CFS_DECL_SPIN_EXTERN(name) extern spinlock_t name;
#define DEFINE_SPINLOCK {0}
-static inline void cfs_spin_lock_init(cfs_spinlock_t *lock)
+static inline void spin_lock_init(spinlock_t *lock)
{
- KeInitializeSpinLock(&(lock->lock));
+ KeInitializeSpinLock(&(lock->lock));
}
-static inline void cfs_spin_lock(cfs_spinlock_t *lock)
+static inline void spin_lock(spinlock_t *lock)
{
- KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+ KeAcquireSpinLock(&(lock->lock), &(lock->irql));
}
-static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass)
+static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
{
- KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+ KeAcquireSpinLock(&(lock->lock), &(lock->irql));
}
-static inline void cfs_spin_unlock(cfs_spinlock_t *lock)
+static inline void spin_unlock(spinlock_t *lock)
{
- KIRQL irql = lock->irql;
- KeReleaseSpinLock(&(lock->lock), irql);
+ KIRQL irql = lock->irql;
+ KeReleaseSpinLock(&(lock->lock), irql);
}
-#define cfs_spin_lock_irqsave(lock, flags) \
-do {(flags) = 0; cfs_spin_lock(lock);} while(0)
+#define spin_lock_irqsave(lock, flags) \
+ do { (flags) = 0; spin_lock(lock); } while (0)
-#define cfs_spin_unlock_irqrestore(lock, flags) \
-do {cfs_spin_unlock(lock);} while(0)
+#define spin_unlock_irqrestore(lock, flags) \
+ do { spin_unlock(lock); } while (0)
/* There's no corresponding routine in windows kernel.
extern int libcfs_mp_system;
-static int cfs_spin_trylock(cfs_spinlock_t *lock)
+static int spin_trylock(spinlock_t *lock)
{
- KIRQL Irql;
- int rc = 0;
+ KIRQL Irql;
+ int rc = 0;
- ASSERT(lock != NULL);
+ ASSERT(lock != NULL);
- KeRaiseIrql(DISPATCH_LEVEL, &Irql);
+ KeRaiseIrql(DISPATCH_LEVEL, &Irql);
- if (libcfs_mp_system) {
- if (0 == (ulong_ptr_t)lock->lock) {
+ if (libcfs_mp_system) {
+ if (0 == (ulong_ptr_t)lock->lock) {
#if _X86_
- __asm {
- mov edx, dword ptr [ebp + 8]
- lock bts dword ptr[edx], 0
- jb lock_failed
- mov rc, TRUE
- lock_failed:
- }
+ __asm {
+ mov edx, dword ptr [ebp + 8]
+ lock bts dword ptr[edx], 0
+ jb lock_failed
+ mov rc, TRUE
+ lock_failed:
+ }
#else
- KdBreakPoint();
+ KdBreakPoint();
#endif
- }
- } else {
- rc = TRUE;
- }
+ }
+ } else {
+ rc = TRUE;
+ }
- if (rc) {
- lock->irql = Irql;
- } else {
- KeLowerIrql(Irql);
- }
+ if (rc) {
+ lock->irql = Irql;
+ } else {
+ KeLowerIrql(Irql);
+ }
- return rc;
+ return rc;
}
-static int cfs_spin_is_locked(cfs_spinlock_t *lock)
+static int spin_is_locked(spinlock_t *lock)
{
#if _WIN32_WINNT >= 0x502
- /* KeTestSpinLock only avalilable on 2k3 server or later */
- return (!KeTestSpinLock(&lock->lock));
+ /* KeTestSpinLock only avalilable on 2k3 server or later */
+ return !KeTestSpinLock(&lock->lock);
#else
- return (int) (lock->lock);
+ return (int) (lock->lock);
#endif
}
/* synchronization between cpus: it will disable all DPCs
kernel task scheduler on the CPU */
-#define cfs_spin_lock_bh(x) cfs_spin_lock(x)
-#define cfs_spin_unlock_bh(x) cfs_spin_unlock(x)
-#define cfs_spin_lock_bh_init(x) cfs_spin_lock_init(x)
+#define spin_lock_bh(x) spin_lock(x)
+#define spin_unlock_bh(x) spin_unlock(x)
+#define spin_lock_bh_init(x) spin_lock_init(x)
/*
- * cfs_rw_semaphore (using ERESOURCE)
+ * rw_semaphore (using ERESOURCE)
*/
-typedef struct cfs_rw_semaphore {
- ERESOURCE rwsem;
-} cfs_rw_semaphore_t;
+struct rw_semaphore {
+ ERESOURCE rwsem;
+};
-#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name
-#define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name
+#define DECLARE_RWSEM(name) struct rw_semaphore name
+#define CFS_DECLARE_RWSEM_EXTERN(name) extern struct rw_semaphore name
/*
- * cfs_init_rwsem
- * To initialize the the cfs_rw_semaphore_t structure
+ * init_rwsem
+ * To initialize the the rw_semaphore structure
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the rw_semaphore structure
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
+static inline void init_rwsem(struct rw_semaphore *s)
{
ExInitializeResourceLite(&s->rwsem);
}
-#define rwsem_init cfs_init_rwsem
+#define rwsem_init init_rwsem
/*
- * cfs_fini_rwsem
- * To finilize/destroy the the cfs_rw_semaphore_t structure
+ * fini_rwsem
+ * To finilize/destroy the the rw_semaphore structure
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the rw_semaphore structure
*
* Return Value:
* N/A
* Just define it NULL for other systems.
*/
-static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
+static inline void fini_rwsem(struct rw_semaphore *s)
{
- ExDeleteResourceLite(&s->rwsem);
+ ExDeleteResourceLite(&s->rwsem);
}
/*
- * cfs_down_read
- * To acquire read-lock of the cfs_rw_semaphore
+ * down_read
+ * To acquire read-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_down_read(cfs_rw_semaphore_t *s)
+static inline void down_read(struct rw_semaphore *s)
{
ExAcquireResourceSharedLite(&s->rwsem, TRUE);
}
-#define cfs_down_read_nested cfs_down_read
+#define down_read_nested down_read
/*
- * cfs_down_read_trylock
- * To acquire read-lock of the cfs_rw_semaphore without blocking
+ * down_read_trylock
+ * To acquire read-lock of the rw_semaphore without blocking
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* Zero: failed to acquire the read lock
* This routine will return immediately without waiting.
*/
-static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
+static inline int down_read_trylock(struct rw_semaphore *s)
{
return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
}
/*
- * cfs_down_write
- * To acquire write-lock of the cfs_rw_semaphore
+ * down_write
+ * To acquire write-lock of the struct rw_semaphore
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_down_write(cfs_rw_semaphore_t *s)
+static inline void down_write(struct rw_semaphore *s)
{
ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
}
-#define cfs_down_write_nested cfs_down_write
+#define down_write_nested down_write
/*
* down_write_trylock
- * To acquire write-lock of the cfs_rw_semaphore without blocking
+ * To acquire write-lock of the rw_semaphore without blocking
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* Zero: failed to acquire the write lock
* This routine will return immediately without waiting.
*/
-static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
+static inline int down_write_trylock(struct rw_semaphore *s)
{
- return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
+ return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
}
/*
- * cfs_up_read
- * To release read-lock of the cfs_rw_semaphore
+ * up_read
+ * To release read-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_up_read(cfs_rw_semaphore_t *s)
+static inline void up_read(struct rw_semaphore *s)
{
- ExReleaseResourceForThreadLite(
- &(s->rwsem),
- ExGetCurrentResourceThread());
+ ExReleaseResourceForThreadLite(&(s->rwsem),
+ ExGetCurrentResourceThread());
}
/*
- * cfs_up_write
- * To release write-lock of the cfs_rw_semaphore
+ * up_write
+ * To release write-lock of the rw_semaphore
*
* Arguments:
- * rwsem: pointer to the cfs_rw_semaphore_t structure
+ * rwsem: pointer to the struct rw_semaphore
*
* Return Value:
* N/A
* N/A
*/
-static inline void cfs_up_write(cfs_rw_semaphore_t *s)
+static inline void up_write(struct rw_semaphore *s)
{
- ExReleaseResourceForThreadLite(
- &(s->rwsem),
- ExGetCurrentResourceThread());
+ ExReleaseResourceForThreadLite(&(s->rwsem),
+ ExGetCurrentResourceThread());
}
/*
*/
typedef struct {
- cfs_spinlock_t guard;
- int count;
-} cfs_rwlock_t;
+ spinlock_t guard;
+ int count;
+} rwlock_t;
-void cfs_rwlock_init(cfs_rwlock_t * rwlock);
-void cfs_rwlock_fini(cfs_rwlock_t * rwlock);
+void rwlock_init(rwlock_t *rwlock);
+void cfs_rwlock_fini(rwlock_t *rwlock);
-void cfs_read_lock(cfs_rwlock_t * rwlock);
-void cfs_read_unlock(cfs_rwlock_t * rwlock);
-void cfs_write_lock(cfs_rwlock_t * rwlock);
-void cfs_write_unlock(cfs_rwlock_t * rwlock);
+void read_lock(rwlock_t *rwlock);
+void read_unlock(rwlock_t *rwlock);
+void write_lock(rwlock_t *rwlock);
+void write_unlock(rwlock_t *rwlock);
-#define cfs_write_lock_irqsave(l, f) do {f = 0; cfs_write_lock(l);} while(0)
-#define cfs_write_unlock_irqrestore(l, f) do {cfs_write_unlock(l);} while(0)
-#define cfs_read_lock_irqsave(l, f do {f=0; cfs_read_lock(l);} while(0)
-#define cfs_read_unlock_irqrestore(l, f) do {cfs_read_unlock(l);} while(0)
+#define write_lock_irqsave(l, f) do { f = 0; write_lock(l); } while (0)
+#define write_unlock_irqrestore(l, f) do { write_unlock(l); } while (0)
+#define read_lock_irqsave(l, f) do { f = 0; read_lock(l); } while (0)
+#define read_unlock_irqrestore(l, f) do { read_unlock(l); } while (0)
-#define cfs_write_lock_bh cfs_write_lock
-#define cfs_write_unlock_bh cfs_write_unlock
+#define write_lock_bh write_lock
+#define write_unlock_bh write_unlock
-typedef struct cfs_lock_class_key {
- int foo;
-} cfs_lock_class_key_t;
+struct lock_class_key {
+ int foo;
+};
-#define cfs_lockdep_set_class(lock, class) do {} while(0)
+#define lockdep_set_class(lock, class) do {} while (0)
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
{
}
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
{
}
* - __up(x)
*/
-typedef struct cfs_semaphore {
+struct semaphore {
KSEMAPHORE sem;
-} cfs_semaphore_t;
+};
-static inline void cfs_sema_init(cfs_semaphore_t *s, int val)
+static inline void sema_init(struct semaphore *s, int val)
{
KeInitializeSemaphore(&s->sem, val, val);
}
-static inline void __down(cfs_semaphore_t *s)
+static inline void __down(struct semaphore *s)
{
- KeWaitForSingleObject( &(s->sem), Executive,
- KernelMode, FALSE, NULL );
+ KeWaitForSingleObject(&(s->sem), Executive, KernelMode, FALSE, NULL);
}
-static inline void __up(cfs_semaphore_t *s)
+static inline void __up(struct semaphore *s)
{
KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
}
-static inline int down_trylock(cfs_semaphore_t *s)
+static inline int down_trylock(struct semaphore *s)
{
- LARGE_INTEGER timeout = {0};
- NTSTATUS status =
- KeWaitForSingleObject( &(s->sem), Executive,
- KernelMode, FALSE, &timeout);
+ LARGE_INTEGER timeout = {0};
+ NTSTATUS status = KeWaitForSingleObject(&(s->sem), Executive,
+ KernelMode, FALSE, &timeout);
- if (status == STATUS_SUCCESS) {
- return 0;
- }
+ if (status == STATUS_SUCCESS)
+ return 0;
- return 1;
+ return 1;
}
/*
* - mutex_down(x)
*/
-typedef struct cfs_semaphore cfs_mutex_t;
+#define mutex semaphore
-#define CFS_DECLARE_MUTEX(x) cfs_mutex_t x
+#define CFS_DECLARE_MUTEX(x) struct mutex x
/*
* init_mutex
* Notes:
* N/A
*/
-#define cfs_mutex_init cfs_init_mutex
-static inline void cfs_init_mutex(cfs_mutex_t *mutex)
+#define mutex_init cfs_init_mutex
+static inline void cfs_init_mutex(struct mutex *mutex)
{
- cfs_sema_init(mutex, 1);
+ sema_init(mutex, 1);
}
/*
* N/A
*/
-static inline void cfs_mutex_down(cfs_mutex_t *mutex)
+static inline void cfs_mutex_down(struct mutex *mutex)
{
- __down(mutex);
+ __down(mutex);
}
-static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex)
+static inline int cfs_mutex_down_interruptible(struct mutex *mutex)
{
- __down(mutex);
- return 0;
+ __down(mutex);
+ return 0;
}
-#define cfs_mutex_lock(m) cfs_mutex_down(m)
-#define cfs_mutex_trylock(s) down_trylock(s)
-#define cfs_mutex_lock_nested(m) cfs_mutex_down(m)
-#define cfs_down(m) cfs_mutex_down(m)
-#define cfs_down_interruptible(m) cfs_mutex_down_interruptible(m)
+#define mutex_lock(m) cfs_mutex_down(m)
+#define mutex_trylock(s) down_trylock(s)
+#define mutex_lock_nested(m) cfs_mutex_down(m)
+#define down(m) cfs_mutex_down(m)
+#define down_interruptible(m) cfs_mutex_down_interruptible(m)
/*
* mutex_up
* N/A
*/
-static inline void cfs_mutex_up(cfs_mutex_t *mutex)
+static inline void cfs_mutex_up(struct mutex *mutex)
{
- __up(mutex);
+ __up(mutex);
}
-#define cfs_mutex_unlock(m) cfs_mutex_up(m)
-#define cfs_up(m) cfs_mutex_up(m)
+#define mutex_unlock(m) cfs_mutex_up(m)
+#define up(m) cfs_mutex_up(m)
/*
* init_mutex_locked
* N/A
*/
-static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex)
+static inline void cfs_init_mutex_locked(struct mutex *mutex)
{
- cfs_init_mutex(mutex);
- cfs_mutex_down(mutex);
+ cfs_init_mutex(mutex);
+ cfs_mutex_down(mutex);
}
-static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
+static inline void mutex_destroy(struct mutex *mutex)
{
}
* - wait_for_completion(c)
*/
-typedef struct {
+struct completion{
event_t event;
-} cfs_completion_t;
+};
/*
* N/A
*/
-static inline void cfs_init_completion(cfs_completion_t *c)
+static inline void init_completion(struct completion *c)
{
cfs_init_event(&(c->event), 1, FALSE);
}
* N/A
*/
-static inline void cfs_complete(cfs_completion_t *c)
+static inline void complete(struct completion *c)
{
cfs_wake_event(&(c->event));
}
* N/A
*/
-static inline void cfs_wait_for_completion(cfs_completion_t *c)
+static inline void wait_for_completion(struct completion *c)
{
- cfs_wait_event_internal(&(c->event), 0);
+ cfs_wait_event_internal(&(c->event), 0);
}
-static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
+static inline int wait_for_completion_interruptible(struct completion *c)
{
- cfs_wait_event_internal(&(c->event), 0);
- return 0;
+ cfs_wait_event_internal(&(c->event), 0);
+ return 0;
}
-#else /* !__KERNEL__ */
#endif /* !__KERNEL__ */
#endif
/* Make it prettier to test the above... */
#define UnlockPage(page) unlock_page(page)
-#define Page_Uptodate(page) cfs_test_bit(PG_uptodate, &(page)->flags)
-#define SetPageUptodate(page) \
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+#define SetPageUptodate(page) \
do { \
arch_set_page_uptodate(page); \
- cfs_set_bit(PG_uptodate, &(page)->flags); \
+ set_bit(PG_uptodate, &(page)->flags); \
} while (0)
-#define ClearPageUptodate(page) cfs_clear_bit(PG_uptodate, &(page)->flags)
-#define PageDirty(page) cfs_test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page) cfs_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page) cfs_clear_bit(PG_dirty, &(page)->flags)
-#define PageLocked(page) cfs_test_bit(PG_locked, &(page)->flags)
-#define LockPage(page) cfs_set_bit(PG_locked, &(page)->flags)
-#define TryLockPage(page) cfs_test_and_set_bit(PG_locked, &(page)->flags)
-#define PageChecked(page) cfs_test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page) cfs_set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) cfs_clear_bit(PG_checked, &(page)->flags)
-#define PageLaunder(page) cfs_test_bit(PG_launder, &(page)->flags)
-#define SetPageLaunder(page) cfs_set_bit(PG_launder, &(page)->flags)
-#define ClearPageLaunder(page) cfs_clear_bit(PG_launder, &(page)->flags)
-#define ClearPageArch1(page) cfs_clear_bit(PG_arch_1, &(page)->flags)
-
-#define PageError(page) cfs_test_bit(PG_error, &(page)->flags)
-#define SetPageError(page) cfs_set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page) cfs_clear_bit(PG_error, &(page)->flags)
-#define PageReferenced(page) cfs_test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) cfs_set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) cfs_clear_bit(PG_referenced, &(page)->flags)
-
-#define PageActive(page) cfs_test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page) cfs_set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page) cfs_clear_bit(PG_active, &(page)->flags)
-
-#define PageWriteback(page) cfs_test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) cfs_test_and_set_bit(PG_writeback, \
+#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
+#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
+#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
+#define LockPage(page) set_bit(PG_locked, &(page)->flags)
+#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
+#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
+#define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
+
+#define PageError(page) test_bit(PG_error, &(page)->flags)
+#define SetPageError(page) set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
+#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
+
+#define PageActive(page) test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
+
+#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
+#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback, \
&(page)->flags)
-#define TestClearPageWriteback(page) cfs_test_and_clear_bit(PG_writeback, \
+#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback, \
&(page)->flags)
#define __GFP_FS (1)
size_t count;
loff_t index;
u32 version;
- cfs_mutex_t lock;
+ struct mutex lock;
const struct seq_operations *op;
void *private;
};
#define CFS_WAITLINK_MAGIC 'CWLM'
typedef struct cfs_waitq {
+ unsigned int magic;
+ unsigned int flags;
- unsigned int magic;
- unsigned int flags;
-
- cfs_spinlock_t guard;
- cfs_list_t waiters;
+ spinlock_t guard;
+ cfs_list_t waiters;
} cfs_waitq_t;
#define TASKSLT_MAGIC 'TSLT' /* Task Slot */
typedef struct _TASK_MAN {
+ ULONG Magic; /* Magic and Flags */
+ ULONG Flags;
- ULONG Magic; /* Magic and Flags */
- ULONG Flags;
-
- cfs_spinlock_t Lock; /* Protection lock */
-
- cfs_mem_cache_t *slab; /* Memory slab for task slot */
+ spinlock_t Lock; /* Protection lock */
- ULONG NumOfTasks; /* Total tasks (threads) */
- LIST_ENTRY TaskList; /* List of task slots */
+ cfs_mem_cache_t *slab; /* Memory slab for task slot */
+ ULONG NumOfTasks; /* Total tasks (threads) */
+ LIST_ENTRY TaskList; /* List of task slots */
} TASK_MAN, *PTASK_MAN;
typedef struct _TASK_SLOT {
} KS_TSDU_MDL, *PKS_TSDU_MDL;
typedef struct ks_engine_mgr {
- cfs_spinlock_t lock;
- int stop;
- event_t exit;
- event_t start;
- cfs_list_t list;
+ spinlock_t lock;
+ int stop;
+ event_t exit;
+ event_t start;
+ cfs_list_t list;
} ks_engine_mgr_t;
typedef struct ks_engine_slot {
} ks_engine_slot_t;
typedef struct _KS_TSDUMGR {
- cfs_list_t TsduList;
- ULONG NumOfTsdu;
- ULONG TotalBytes;
- KEVENT Event;
- cfs_spinlock_t Lock;
- ks_engine_slot_t Slot;
- ULONG Payload;
- int Busy:1;
- int OOB:1;
+ cfs_list_t TsduList;
+ ULONG NumOfTsdu;
+ ULONG TotalBytes;
+ KEVENT Event;
+ spinlock_t Lock;
+ ks_engine_slot_t Slot;
+ ULONG Payload;
+ int Busy:1;
+ int OOB:1;
} KS_TSDUMGR, *PKS_TSDUMGR;
-#define ks_lock_tsdumgr(mgr) cfs_spin_lock(&((mgr)->Lock))
-#define ks_unlock_tsdumgr(mgr) cfs_spin_unlock(&((mgr)->Lock))
+#define ks_lock_tsdumgr(mgr) spin_lock(&((mgr)->Lock))
+#define ks_unlock_tsdumgr(mgr) spin_unlock(&((mgr)->Lock))
typedef struct _KS_CHAIN {
KS_TSDUMGR Normal; /* normal queue */
ulong kstc_magic; /* Magic & Flags */
ulong kstc_flags;
- cfs_spinlock_t kstc_lock; /* serialise lock*/
+ spinlock_t kstc_lock; /* serialise lock*/
void * kstc_conn; /* ks_conn_t */
ks_tconn_type_t kstc_type; /* tdi connection Type */
} ks_addr_slot_t;
typedef struct {
+ /*
+ * Tdi client information
+ */
- /*
- * Tdi client information
- */
+ UNICODE_STRING ksnd_client_name; /* tdi client module name */
+ HANDLE ksnd_pnp_handle; /* the handle for pnp changes */
- UNICODE_STRING ksnd_client_name; /* tdi client module name */
- HANDLE ksnd_pnp_handle; /* the handle for pnp changes */
-
- cfs_spinlock_t ksnd_addrs_lock; /* serialize ip address list access */
+ spinlock_t ksnd_addrs_lock; /* serialize ip address list */
LIST_ENTRY ksnd_addrs_list; /* list of the ip addresses */
int ksnd_naddrs; /* number of the ip addresses */
TDI_PROVIDER_INFO ksnd_provider; /* tdi tcp/ip provider's information */
- cfs_spinlock_t ksnd_tconn_lock; /* tdi connections access serialise */
+ spinlock_t ksnd_tconn_lock; /* tdi connections access lock*/
+
+ int ksnd_ntconns; /* number of tconns in list */
+ cfs_list_t ksnd_tconns; /* tdi connections list */
+ cfs_mem_cache_t *ksnd_tconn_slab; /* ks_tconn_t allocation slabs*/
+ event_t ksnd_tconn_exit; /* event signal by last tconn */
- int ksnd_ntconns; /* number of tconns attached in list */
- cfs_list_t ksnd_tconns; /* tdi connections list */
- cfs_mem_cache_t * ksnd_tconn_slab; /* slabs for ks_tconn_t allocations */
- event_t ksnd_tconn_exit; /* exit event to be signaled by the last tconn */
+ spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
- cfs_spinlock_t ksnd_tsdu_lock; /* tsdu access serialise */
-
int ksnd_ntsdus; /* number of tsdu buffers allocated */
ulong ksnd_tsdu_size; /* the size of a signel tsdu buffer */
cfs_mem_cache_t *ksnd_tsdu_slab; /* slab cache for tsdu buffer allocation */
};
static struct cfs_zone_nob cfs_zone_nob;
-static spinlock_t cfs_zone_guard;
+static spinlock_t cfs_zone_guard;
cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
{
- cfs_mem_cache_t *walker = NULL;
+ cfs_mem_cache_t *walker = NULL;
- LASSERT(cfs_zone_nob.z_nob != NULL);
+ LASSERT(cfs_zone_nob.z_nob != NULL);
- spin_lock(&cfs_zone_guard);
- list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
- if (!strcmp(walker->mc_name, name) && \
- walker->mc_size == objsize)
- break;
- }
- spin_unlock(&cfs_zone_guard);
+ spin_lock(&cfs_zone_guard);
+ list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
+ if (!strcmp(walker->mc_name, name) && \
+ walker->mc_size == objsize)
+ break;
+ }
+ spin_unlock(&cfs_zone_guard);
- return walker;
+ return walker;
}
/*
void raw_page_death_row_clean(void)
{
- struct xnu_raw_page *pg;
+ struct xnu_raw_page *pg;
- spin_lock(&page_death_row_phylax);
- while (!list_empty(&page_death_row)) {
- pg = container_of(page_death_row.next,
- struct xnu_raw_page, link);
- list_del(&pg->link);
- spin_unlock(&page_death_row_phylax);
- raw_page_finish(pg);
- spin_lock(&page_death_row_phylax);
- }
- spin_unlock(&page_death_row_phylax);
+ spin_lock(&page_death_row_phylax);
+ while (!list_empty(&page_death_row)) {
+ pg = container_of(page_death_row.next,
+ struct xnu_raw_page, link);
+ list_del(&pg->link);
+ spin_unlock(&page_death_row_phylax);
+ raw_page_finish(pg);
+ spin_lock(&page_death_row_phylax);
+ }
+ spin_unlock(&page_death_row_phylax);
}
/* Free a "page" */
{
if (!atomic_dec_and_test(&pg->count))
return;
- /*
- * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
- * block. (raw_page_done()->upl_abort() can block too) On the other
- * hand, cfs_free_page() may be called in non-blockable context. To
- * work around this, park pages on global list when cannot block.
- */
- if (get_preemption_level() > 0) {
- spin_lock(&page_death_row_phylax);
- list_add(&pg->link, &page_death_row);
- spin_unlock(&page_death_row_phylax);
- } else {
- raw_page_finish(pg);
- raw_page_death_row_clean();
- }
+ /*
+ * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
+ * block. (raw_page_done()->upl_abort() can block too) On the other
+ * hand, cfs_free_page() may be called in non-blockable context. To
+ * work around this, park pages on global list when cannot block.
+ */
+ if (get_preemption_level() > 0) {
+ spin_lock(&page_death_row_phylax);
+ list_add(&pg->link, &page_death_row);
+ spin_unlock(&page_death_row_phylax);
+ } else {
+ raw_page_finish(pg);
+ raw_page_death_row_clean();
+ }
}
cfs_page_t *cfs_alloc_page(u_int32_t flags)
cfs_zone_nob.z_nob = nob->z_nob;
}
- spin_lock_init(&cfs_zone_guard);
+ spin_lock_init(&cfs_zone_guard);
#endif
- CFS_INIT_LIST_HEAD(&page_death_row);
- spin_lock_init(&page_death_row_phylax);
- raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
- return 0;
+ CFS_INIT_LIST_HEAD(&page_death_row);
+ spin_lock_init(&page_death_row_phylax);
+ raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
+ return 0;
}
void cfs_mem_fini(void)
{
- raw_page_death_row_clean();
- spin_lock_done(&page_death_row_phylax);
- cfs_mem_cache_destroy(raw_page_cache);
+ raw_page_death_row_clean();
+ spin_lock_done(&page_death_row_phylax);
+ cfs_mem_cache_destroy(raw_page_cache);
-#if CFS_INDIVIDUAL_ZONE
- cfs_zone_nob.z_nob = NULL;
- spin_lock_done(&cfs_zone_guard);
+#if CFS_INDIVIDUAL_ZONE
+ cfs_zone_nob.z_nob = NULL;
+ spin_lock_done(&cfs_zone_guard);
#endif
}
int count = cfs_atomic_inc_return(&cfs_fail_count);
if (count >= cfs_fail_val) {
- cfs_set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+ set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
cfs_atomic_set(&cfs_fail_count, 0);
/* we are lost race to increase */
if (count > cfs_fail_val)
if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
(value & CFS_FAIL_ONCE))
- cfs_set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+ set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
/* Lost race to set CFS_FAILED_BIT. */
- if (cfs_test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
+ if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
/* If CFS_FAIL_ONCE is valid, only one process can fail,
* otherwise multi-process can fail at the same time. */
if (cfs_fail_loc & CFS_FAIL_ONCE)
static inline void
cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
{
- cfs_spin_lock(&lock->spin);
+ spin_lock(&lock->spin);
}
static inline void
cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
{
- cfs_spin_unlock(&lock->spin);
+ spin_unlock(&lock->spin);
}
static inline void
cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
{
- if (!exclusive)
- cfs_read_lock(&lock->rw);
- else
- cfs_write_lock(&lock->rw);
+ if (!exclusive)
+ read_lock(&lock->rw);
+ else
+ write_lock(&lock->rw);
}
static inline void
cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
{
- if (!exclusive)
- cfs_read_unlock(&lock->rw);
- else
- cfs_write_unlock(&lock->rw);
+ if (!exclusive)
+ read_unlock(&lock->rw);
+ else
+ write_unlock(&lock->rw);
}
/** No lock hash */
static void
cfs_hash_lock_setup(cfs_hash_t *hs)
{
- if (cfs_hash_with_no_lock(hs)) {
- hs->hs_lops = &cfs_hash_nl_lops;
+ if (cfs_hash_with_no_lock(hs)) {
+ hs->hs_lops = &cfs_hash_nl_lops;
- } else if (cfs_hash_with_no_bktlock(hs)) {
- hs->hs_lops = &cfs_hash_nbl_lops;
- cfs_spin_lock_init(&hs->hs_lock.spin);
+ } else if (cfs_hash_with_no_bktlock(hs)) {
+ hs->hs_lops = &cfs_hash_nbl_lops;
+ spin_lock_init(&hs->hs_lock.spin);
- } else if (cfs_hash_with_rehash(hs)) {
- cfs_rwlock_init(&hs->hs_lock.rw);
+ } else if (cfs_hash_with_rehash(hs)) {
+ rwlock_init(&hs->hs_lock.rw);
if (cfs_hash_with_rw_bktlock(hs))
hs->hs_lops = &cfs_hash_bkt_rw_lops;
max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
return;
- cfs_spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_max = dep_cur;
- hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
- hs->hs_dep_off = bd->bd_offset;
- hs->hs_dep_bits = hs->hs_cur_bits;
- cfs_spin_unlock(&hs->hs_dep_lock);
+ spin_lock(&hs->hs_dep_lock);
+ hs->hs_dep_max = dep_cur;
+ hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
+ hs->hs_dep_off = bd->bd_offset;
+ hs->hs_dep_bits = hs->hs_cur_bits;
+ spin_unlock(&hs->hs_dep_lock);
cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
# endif
cfs_hash_with_no_bktlock(hs))
continue;
- if (cfs_hash_with_rw_bktlock(hs))
- cfs_rwlock_init(&new_bkts[i]->hsb_lock.rw);
- else if (cfs_hash_with_spin_bktlock(hs))
- cfs_spin_lock_init(&new_bkts[i]->hsb_lock.spin);
- else
- LBUG(); /* invalid use-case */
- }
- return new_bkts;
+ if (cfs_hash_with_rw_bktlock(hs))
+ rwlock_init(&new_bkts[i]->hsb_lock.rw);
+ else if (cfs_hash_with_spin_bktlock(hs))
+ spin_lock_init(&new_bkts[i]->hsb_lock.spin);
+ else
+ LBUG(); /* invalid use-case */
+ }
+ return new_bkts;
}
/**
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static int cfs_hash_dep_print(cfs_workitem_t *wi)
{
- cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
- int dep;
- int bkt;
- int off;
- int bits;
-
- cfs_spin_lock(&hs->hs_dep_lock);
- dep = hs->hs_dep_max;
- bkt = hs->hs_dep_bkt;
- off = hs->hs_dep_off;
- bits = hs->hs_dep_bits;
- cfs_spin_unlock(&hs->hs_dep_lock);
-
- LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
- hs->hs_name, bits, dep, bkt, off);
- cfs_spin_lock(&hs->hs_dep_lock);
- hs->hs_dep_bits = 0; /* mark as workitem done */
- cfs_spin_unlock(&hs->hs_dep_lock);
- return 0;
+ cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+ int dep;
+ int bkt;
+ int off;
+ int bits;
+
+ spin_lock(&hs->hs_dep_lock);
+ dep = hs->hs_dep_max;
+ bkt = hs->hs_dep_bkt;
+ off = hs->hs_dep_off;
+ bits = hs->hs_dep_bits;
+ spin_unlock(&hs->hs_dep_lock);
+
+ LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
+ hs->hs_name, bits, dep, bkt, off);
+ spin_lock(&hs->hs_dep_lock);
+ hs->hs_dep_bits = 0; /* mark as workitem done */
+ spin_unlock(&hs->hs_dep_lock);
+ return 0;
}
static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
{
- cfs_spin_lock_init(&hs->hs_dep_lock);
+ spin_lock_init(&hs->hs_dep_lock);
cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
}
static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
{
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
- return;
+ return;
- cfs_spin_lock(&hs->hs_dep_lock);
- while (hs->hs_dep_bits != 0) {
- cfs_spin_unlock(&hs->hs_dep_lock);
- cfs_cond_resched();
- cfs_spin_lock(&hs->hs_dep_lock);
- }
- cfs_spin_unlock(&hs->hs_dep_lock);
+ spin_lock(&hs->hs_dep_lock);
+ while (hs->hs_dep_bits != 0) {
+ spin_unlock(&hs->hs_dep_lock);
+ cfs_cond_resched();
+ spin_lock(&hs->hs_dep_lock);
+ }
+ spin_unlock(&hs->hs_dep_lock);
}
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
if (maxdep < bd.bd_bucket->hsb_depmax) {
maxdep = bd.bd_bucket->hsb_depmax;
#ifdef __KERNEL__
- maxdepb = cfs_ffz(~maxdep);
+ maxdepb = ffz(~maxdep);
#endif
}
total += bd.bd_bucket->hsb_count;
};
static cfs_list_t kkuc_groups[KUC_GRP_MAX+1] = {};
/* Protect message sending against remove and adds */
-static CFS_DECLARE_RWSEM(kg_sem);
+static DECLARE_RWSEM(kg_sem);
/** Add a receiver to a broadcast group
* @param filp pipe to write into
reg->kr_uid = uid;
reg->kr_data = data;
- cfs_down_write(&kg_sem);
+ down_write(&kg_sem);
if (kkuc_groups[group].next == NULL)
CFS_INIT_LIST_HEAD(&kkuc_groups[group]);
cfs_list_add(®->kr_chain, &kkuc_groups[group]);
- cfs_up_write(&kg_sem);
+ up_write(&kg_sem);
CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group);
libcfs_kkuc_group_put(group, &lh);
}
- cfs_down_write(&kg_sem);
+ down_write(&kg_sem);
cfs_list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
if ((uid == 0) || (uid == reg->kr_uid)) {
cfs_list_del(®->kr_chain);
cfs_free(reg);
}
}
- cfs_up_write(&kg_sem);
+ up_write(&kg_sem);
RETURN(0);
}
int rc = 0;
ENTRY;
- cfs_down_read(&kg_sem);
+ down_read(&kg_sem);
cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
if (reg->kr_fp != NULL) {
rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
}
}
}
- cfs_up_read(&kg_sem);
+ up_read(&kg_sem);
RETURN(rc);
}
if (kkuc_groups[group].next == NULL)
RETURN(0);
- cfs_down_read(&kg_sem);
+ down_read(&kg_sem);
cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
if (reg->kr_fp != NULL) {
rc = cb_func(reg->kr_data, cb_arg);
}
}
- cfs_up_read(&kg_sem);
+ up_read(&kg_sem);
RETURN(rc);
}
cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
{
struct cfs_percpt_lock *pcl;
- cfs_spinlock_t *lock;
+ spinlock_t *lock;
int i;
/* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
}
cfs_percpt_for_each(lock, i, pcl->pcl_locks)
- cfs_spin_lock_init(lock);
+ spin_lock_init(lock);
return pcl;
}
}
if (likely(index != CFS_PERCPT_LOCK_EX)) {
- cfs_spin_lock(pcl->pcl_locks[index]);
+ spin_lock(pcl->pcl_locks[index]);
return;
}
/* exclusive lock request */
for (i = 0; i < ncpt; i++) {
- cfs_spin_lock(pcl->pcl_locks[i]);
+ spin_lock(pcl->pcl_locks[i]);
if (i == 0) {
LASSERT(!pcl->pcl_locked);
/* nobody should take private lock after this
index = ncpt == 1 ? 0 : index;
if (likely(index != CFS_PERCPT_LOCK_EX)) {
- cfs_spin_unlock(pcl->pcl_locks[index]);
+ spin_unlock(pcl->pcl_locks[index]);
return;
}
LASSERT(pcl->pcl_locked);
pcl->pcl_locked = 0;
}
- cfs_spin_unlock(pcl->pcl_locks[i]);
+ spin_unlock(pcl->pcl_locks[i]);
}
}
CFS_EXPORT_SYMBOL(cfs_percpt_unlock);
int oom_get_adj(struct task_struct *task, int scope)
{
-
- int oom_adj;
+ int oom_adj;
#ifdef HAVE_OOMADJ_IN_SIG
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&task->sighand->siglock, flags);
- oom_adj = task->signal->oom_adj;
- task->signal->oom_adj = scope;
- spin_unlock_irqrestore(&task->sighand->siglock, flags);
+ spin_lock_irqsave(&task->sighand->siglock, flags);
+ oom_adj = task->signal->oom_adj;
+ task->signal->oom_adj = scope;
+ spin_unlock_irqrestore(&task->sighand->siglock, flags);
#else
- oom_adj = task->oomkilladj;
- task->oomkilladj = scope;
+ oom_adj = task->oomkilladj;
+ task->oomkilladj = scope;
#endif
- return oom_adj;
+ return oom_adj;
}
int cfs_create_thread(int (*fn)(void *),
void
cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
{
- unsigned long flags;
+ unsigned long flags;
- spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
- __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
- spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+ spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+ __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
+ spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
}
EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
int cfs_tracefile_init_arch()
{
int j;
struct cfs_trace_cpu_data *tcd;
- cfs_init_rwsem(&cfs_tracefile_sem);
+ init_rwsem(&cfs_tracefile_sem);
/* initialize trace_data */
memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
/* arch related info initialized */
cfs_tcd_for_each(tcd, i, j) {
- cfs_spin_lock_init(&tcd->tcd_lock);
+ spin_lock_init(&tcd->tcd_lock);
tcd->tcd_pages_factor = pages_factor[i];
tcd->tcd_type = i;
tcd->tcd_cpu = j;
cfs_trace_data[i] = NULL;
}
- cfs_fini_rwsem(&cfs_tracefile_sem);
+ fini_rwsem(&cfs_tracefile_sem);
}
void cfs_tracefile_read_lock()
{
- cfs_down_read(&cfs_tracefile_sem);
+ down_read(&cfs_tracefile_sem);
}
void cfs_tracefile_read_unlock()
{
- cfs_up_read(&cfs_tracefile_sem);
+ up_read(&cfs_tracefile_sem);
}
void cfs_tracefile_write_lock()
{
- cfs_down_write(&cfs_tracefile_sem);
+ down_write(&cfs_tracefile_sem);
}
void cfs_tracefile_write_unlock()
{
- cfs_up_write(&cfs_tracefile_sem);
+ up_write(&cfs_tracefile_sem);
}
cfs_trace_buf_type_t cfs_trace_buf_idx_get()
int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- cfs_spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- cfs_spin_lock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- cfs_spin_lock_irq(&tcd->tcd_lock);
- else
- cfs_spin_lock(&tcd->tcd_lock);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ spin_lock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ spin_lock_irq(&tcd->tcd_lock);
+ else
+ spin_lock(&tcd->tcd_lock);
return 1;
}
void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
{
__LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
- if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
- cfs_spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
- else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
- cfs_spin_unlock_bh(&tcd->tcd_lock);
- else if (unlikely(walking))
- cfs_spin_unlock_irq(&tcd->tcd_lock);
- else
- cfs_spin_unlock(&tcd->tcd_lock);
+ if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+ spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
+ else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+ spin_unlock_bh(&tcd->tcd_lock);
+ else if (unlikely(walking))
+ spin_unlock_irq(&tcd->tcd_lock);
+ else
+ spin_unlock(&tcd->tcd_lock);
}
int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
RETURN(0);
}
-static cfs_rw_semaphore_t ioctl_list_sem;
+static struct rw_semaphore ioctl_list_sem;
static cfs_list_t ioctl_list;
int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
{
int rc = 0;
- cfs_down_write(&ioctl_list_sem);
+ down_write(&ioctl_list_sem);
if (!cfs_list_empty(&hand->item))
rc = -EBUSY;
else
cfs_list_add_tail(&hand->item, &ioctl_list);
- cfs_up_write(&ioctl_list_sem);
+ up_write(&ioctl_list_sem);
return rc;
}
{
int rc = 0;
- cfs_down_write(&ioctl_list_sem);
+ down_write(&ioctl_list_sem);
if (cfs_list_empty(&hand->item))
rc = -ENOENT;
else
cfs_list_del_init(&hand->item);
- cfs_up_write(&ioctl_list_sem);
+ up_write(&ioctl_list_sem);
return rc;
}
default: {
struct libcfs_ioctl_handler *hand;
err = -EINVAL;
- cfs_down_read(&ioctl_list_sem);
+ down_read(&ioctl_list_sem);
cfs_list_for_each_entry_typed(hand, &ioctl_list,
struct libcfs_ioctl_handler, item) {
err = hand->handle_ioctl(cmd, data);
break;
}
}
- cfs_up_read(&ioctl_list_sem);
+ up_read(&ioctl_list_sem);
break;
}
}
MODULE_LICENSE("GPL");
extern cfs_psdev_t libcfs_dev;
-extern cfs_rw_semaphore_t cfs_tracefile_sem;
-extern cfs_mutex_t cfs_trace_thread_mutex;
+extern struct rw_semaphore cfs_tracefile_sem;
+extern struct mutex cfs_trace_thread_mutex;
extern struct cfs_wi_sched *cfs_sched_rehash;
extern void libcfs_init_nidstrings(void);
libcfs_arch_init();
libcfs_init_nidstrings();
- cfs_init_rwsem(&cfs_tracefile_sem);
- cfs_mutex_init(&cfs_trace_thread_mutex);
- cfs_init_rwsem(&ioctl_list_sem);
+ init_rwsem(&cfs_tracefile_sem);
+ mutex_init(&cfs_trace_thread_mutex);
+ init_rwsem(&ioctl_list_sem);
CFS_INIT_LIST_HEAD(&ioctl_list);
cfs_waitq_init(&cfs_race_waitq);
printk(CFS_KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n",
rc);
- cfs_fini_rwsem(&ioctl_list_sem);
- cfs_fini_rwsem(&cfs_tracefile_sem);
+ fini_rwsem(&ioctl_list_sem);
+ fini_rwsem(&cfs_tracefile_sem);
libcfs_arch_cleanup();
}
static int libcfs_nidstring_idx = 0;
#ifdef __KERNEL__
-static cfs_spinlock_t libcfs_nidstring_lock;
+static spinlock_t libcfs_nidstring_lock;
void libcfs_init_nidstrings (void)
{
- cfs_spin_lock_init(&libcfs_nidstring_lock);
+ spin_lock_init(&libcfs_nidstring_lock);
}
-# define NIDSTR_LOCK(f) cfs_spin_lock_irqsave(&libcfs_nidstring_lock, f)
-# define NIDSTR_UNLOCK(f) cfs_spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
+# define NIDSTR_LOCK(f) spin_lock_irqsave(&libcfs_nidstring_lock, f)
+# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
#else
# define NIDSTR_LOCK(f) (f=sizeof(f)) /* avoid set-but-unused warnings */
# define NIDSTR_UNLOCK(f) (f=sizeof(f))
char cfs_tracefile[TRACEFILE_NAME_SIZE];
long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
static struct tracefiled_ctl trace_tctl;
-cfs_mutex_t cfs_trace_thread_mutex;
+struct mutex cfs_trace_thread_mutex;
static int thread_running = 0;
cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
pgcount + 1, tcd->tcd_cur_pages);
CFS_INIT_LIST_HEAD(&pc.pc_pages);
- cfs_spin_lock_init(&pc.pc_lock);
+ spin_lock_init(&pc.pc_lock);
cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
struct cfs_trace_page, linkage) {
static void collect_pages_on_all_cpus(struct page_collection *pc)
{
- struct cfs_trace_cpu_data *tcd;
- int i, cpu;
+ struct cfs_trace_cpu_data *tcd;
+ int i, cpu;
- cfs_spin_lock(&pc->pc_lock);
+ spin_lock(&pc->pc_lock);
cfs_for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
}
}
}
- cfs_spin_unlock(&pc->pc_lock);
+ spin_unlock(&pc->pc_lock);
}
static void collect_pages(struct page_collection *pc)
struct cfs_trace_page *tmp;
int i, cpu;
- cfs_spin_lock(&pc->pc_lock);
+ spin_lock(&pc->pc_lock);
cfs_for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cur_head = tcd->tcd_pages.next;
}
}
}
- cfs_spin_unlock(&pc->pc_lock);
+ spin_unlock(&pc->pc_lock);
}
static void put_pages_back(struct page_collection *pc)
* if we have been steadily writing (and otherwise discarding) pages via the
* debug daemon. */
static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
- struct cfs_trace_cpu_data *tcd)
+ struct cfs_trace_cpu_data *tcd)
{
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- cfs_spin_lock(&pc->pc_lock);
+ spin_lock(&pc->pc_lock);
cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
struct cfs_trace_page, linkage) {
tcd->tcd_cur_daemon_pages--;
}
}
- cfs_spin_unlock(&pc->pc_lock);
+ spin_unlock(&pc->pc_lock);
}
static void put_pages_on_daemon_list(struct page_collection *pc)
void cfs_trace_debug_print(void)
{
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
+ struct page_collection pc;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- cfs_spin_lock_init(&pc.pc_lock);
+ spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
goto out;
}
- cfs_spin_lock_init(&pc.pc_lock);
+ spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
if (cfs_list_empty(&pc.pc_pages)) {
void cfs_trace_flush_pages(void)
{
- struct page_collection pc;
- struct cfs_trace_page *tage;
- struct cfs_trace_page *tmp;
+ struct page_collection pc;
+ struct cfs_trace_page *tage;
+ struct cfs_trace_page *tmp;
- cfs_spin_lock_init(&pc.pc_lock);
+ spin_lock_init(&pc.pc_lock);
pc.pc_want_daemon_pages = 1;
collect_pages(&pc);
/* this is so broken in uml? what on earth is going on? */
cfs_daemonize("ktracefiled");
- cfs_spin_lock_init(&pc.pc_lock);
- cfs_complete(&tctl->tctl_start);
+ spin_lock_init(&pc.pc_lock);
+ complete(&tctl->tctl_start);
while (1) {
cfs_waitlink_t __wait;
cfs_time_seconds(1));
cfs_waitq_del(&tctl->tctl_waitq, &__wait);
}
- cfs_complete(&tctl->tctl_stop);
+ complete(&tctl->tctl_stop);
return 0;
}
struct tracefiled_ctl *tctl = &trace_tctl;
int rc = 0;
- cfs_mutex_lock(&cfs_trace_thread_mutex);
+ mutex_lock(&cfs_trace_thread_mutex);
if (thread_running)
goto out;
- cfs_init_completion(&tctl->tctl_start);
- cfs_init_completion(&tctl->tctl_stop);
+ init_completion(&tctl->tctl_start);
+ init_completion(&tctl->tctl_stop);
cfs_waitq_init(&tctl->tctl_waitq);
cfs_atomic_set(&tctl->tctl_shutdown, 0);
goto out;
}
- cfs_wait_for_completion(&tctl->tctl_start);
+ wait_for_completion(&tctl->tctl_start);
thread_running = 1;
out:
- cfs_mutex_unlock(&cfs_trace_thread_mutex);
+ mutex_unlock(&cfs_trace_thread_mutex);
return rc;
}
{
struct tracefiled_ctl *tctl = &trace_tctl;
- cfs_mutex_lock(&cfs_trace_thread_mutex);
+ mutex_lock(&cfs_trace_thread_mutex);
if (thread_running) {
printk(CFS_KERN_INFO
"Lustre: shutting down debug daemon thread...\n");
cfs_atomic_set(&tctl->tctl_shutdown, 1);
- cfs_wait_for_completion(&tctl->tctl_stop);
+ wait_for_completion(&tctl->tctl_stop);
thread_running = 0;
}
- cfs_mutex_unlock(&cfs_trace_thread_mutex);
+ mutex_unlock(&cfs_trace_thread_mutex);
}
int cfs_tracefile_init(int max_pages)
static void cfs_trace_cleanup(void)
{
- struct page_collection pc;
+ struct page_collection pc;
- CFS_INIT_LIST_HEAD(&pc.pc_pages);
- cfs_spin_lock_init(&pc.pc_lock);
+ CFS_INIT_LIST_HEAD(&pc.pc_pages);
+ spin_lock_init(&pc.pc_lock);
- trace_cleanup_on_all_cpus();
+ trace_cleanup_on_all_cpus();
- cfs_tracefile_fini_arch();
+ cfs_tracefile_fini_arch();
}
void cfs_tracefile_exit(void)
* and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
* tcd_for_each_type_lock
*/
- cfs_spinlock_t tcd_lock;
+ spinlock_t tcd_lock;
unsigned long tcd_lock_flags;
/*
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
struct page_collection {
- cfs_list_t pc_pages;
+ cfs_list_t pc_pages;
/*
* spin-lock protecting ->pc_pages. It is taken by smp_call_function()
* call-back functions. XXX nikita: Which is horrible: all processors
* lock. Probably ->pc_pages should be replaced with an array of
* NR_CPUS elements accessed locklessly.
*/
- cfs_spinlock_t pc_lock;
+ spinlock_t pc_lock;
/*
* if this flag is set, collect_pages() will spill both
* ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
* only ->tcd_pages are spilled.
*/
- int pc_want_daemon_pages;
+ int pc_want_daemon_pages;
};
/* XXX nikita: this declaration is internal to tracefile.c and should probably
* be moved there */
struct tracefiled_ctl {
- cfs_completion_t tctl_start;
- cfs_completion_t tctl_stop;
- cfs_waitq_t tctl_waitq;
- pid_t tctl_pid;
- cfs_atomic_t tctl_shutdown;
+ struct completion tctl_start;
+ struct completion tctl_stop;
+ cfs_waitq_t tctl_waitq;
+ pid_t tctl_pid;
+ cfs_atomic_t tctl_shutdown;
};
/*
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
find_again:
found = 0;
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
/* check invalid & expired items */
if (check_unlink_entry(cache, entry))
if (!found) {
if (!new) {
- cfs_spin_unlock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
new = alloc_entry(cache, key, args);
if (!new) {
CERROR("fail to alloc entry\n");
if (UC_CACHE_IS_NEW(entry)) {
UC_CACHE_SET_ACQUIRING(entry);
UC_CACHE_CLEAR_NEW(entry);
- cfs_spin_unlock(&cache->uc_lock);
- rc = refresh_entry(cache, entry);
- cfs_spin_lock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
+ rc = refresh_entry(cache, entry);
+ spin_lock(&cache->uc_lock);
entry->ue_acquire_expire =
cfs_time_shift(cache->uc_acquire_expire);
if (rc < 0) {
cfs_waitlink_init(&wait);
cfs_waitq_add(&entry->ue_waitq, &wait);
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
- cfs_spin_unlock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
- left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
- expiry);
+ left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+ expiry);
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
cfs_waitq_del(&entry->ue_waitq, &wait);
if (UC_CACHE_IS_ACQUIRING(entry)) {
/* we're interrupted or upcall failed in the middle */
* without any error, should at least give a
* chance to use it once.
*/
- if (entry != new) {
- put_entry(cache, entry);
- cfs_spin_unlock(&cache->uc_lock);
- new = NULL;
- goto find_again;
- }
- }
+ if (entry != new) {
+ put_entry(cache, entry);
+ spin_unlock(&cache->uc_lock);
+ new = NULL;
+ goto find_again;
+ }
+ }
/* Now we know it's good */
out:
- cfs_spin_unlock(&cache->uc_lock);
- RETURN(entry);
+ spin_unlock(&cache->uc_lock);
+ RETURN(entry);
}
EXPORT_SYMBOL(upcall_cache_get_entry);
void upcall_cache_put_entry(struct upcall_cache *cache,
struct upcall_cache_entry *entry)
{
- ENTRY;
-
- if (!entry) {
- EXIT;
- return;
- }
-
- LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
- cfs_spin_lock(&cache->uc_lock);
- put_entry(cache, entry);
- cfs_spin_unlock(&cache->uc_lock);
- EXIT;
+ ENTRY;
+
+ if (!entry) {
+ EXIT;
+ return;
+ }
+
+ LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+ spin_lock(&cache->uc_lock);
+ put_entry(cache, entry);
+ spin_unlock(&cache->uc_lock);
+ EXIT;
}
EXPORT_SYMBOL(upcall_cache_put_entry);
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
cfs_list_for_each_entry(entry, head, ue_hash) {
if (downcall_compare(cache, entry, key, args) == 0) {
found = 1;
CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
cache->uc_name, key);
/* haven't found, it's possible */
- cfs_spin_unlock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
RETURN(-EINVAL);
}
GOTO(out, rc = -EINVAL);
}
- cfs_spin_unlock(&cache->uc_lock);
- if (cache->uc_ops->parse_downcall)
- rc = cache->uc_ops->parse_downcall(cache, entry, args);
- cfs_spin_lock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
+ if (cache->uc_ops->parse_downcall)
+ rc = cache->uc_ops->parse_downcall(cache, entry, args);
+ spin_lock(&cache->uc_lock);
if (rc)
GOTO(out, rc);
cfs_list_del_init(&entry->ue_hash);
}
UC_CACHE_CLEAR_ACQUIRING(entry);
- cfs_spin_unlock(&cache->uc_lock);
- cfs_waitq_broadcast(&entry->ue_waitq);
- put_entry(cache, entry);
+ spin_unlock(&cache->uc_lock);
+ cfs_waitq_broadcast(&entry->ue_waitq);
+ put_entry(cache, entry);
- RETURN(rc);
+ RETURN(rc);
}
EXPORT_SYMBOL(upcall_cache_downcall);
static void cache_flush(struct upcall_cache *cache, int force)
{
- struct upcall_cache_entry *entry, *next;
- int i;
- ENTRY;
+ struct upcall_cache_entry *entry, *next;
+ int i;
+ ENTRY;
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
cfs_list_for_each_entry_safe(entry, next,
&cache->uc_hashtable[i], ue_hash) {
free_entry(cache, entry);
}
}
- cfs_spin_unlock(&cache->uc_lock);
- EXIT;
+ spin_unlock(&cache->uc_lock);
+ EXIT;
}
void upcall_cache_flush_idle(struct upcall_cache *cache)
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
- cfs_spin_lock(&cache->uc_lock);
+ spin_lock(&cache->uc_lock);
cfs_list_for_each_entry(entry, head, ue_hash) {
if (upcall_compare(cache, entry, key, args) == 0) {
found = 1;
if (!cfs_atomic_read(&entry->ue_refcount))
free_entry(cache, entry);
}
- cfs_spin_unlock(&cache->uc_lock);
+ spin_unlock(&cache->uc_lock);
}
EXPORT_SYMBOL(upcall_cache_flush_one);
if (!cache)
RETURN(ERR_PTR(-ENOMEM));
- cfs_spin_lock_init(&cache->uc_lock);
- cfs_rwlock_init(&cache->uc_upcall_rwlock);
+ spin_lock_init(&cache->uc_lock);
+ rwlock_init(&cache->uc_upcall_rwlock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
#define OFF_BY_START(start) ((start)/BITS_PER_LONG)
-unsigned long cfs_find_next_bit(unsigned long *addr,
+unsigned long find_next_bit(unsigned long *addr,
unsigned long size, unsigned long offset)
{
unsigned long *word, *last;
return base + bit;
}
-unsigned long cfs_find_next_zero_bit(unsigned long *addr,
+unsigned long find_next_zero_bit(unsigned long *addr,
unsigned long size, unsigned long offset)
{
unsigned long *word, *last;
* No-op implementation.
*/
-void cfs_spin_lock_init(cfs_spinlock_t *lock)
+void spin_lock_init(spinlock_t *lock)
{
- LASSERT(lock != NULL);
- (void)lock;
+ LASSERT(lock != NULL);
+ (void)lock;
}
-void cfs_spin_lock(cfs_spinlock_t *lock)
+void spin_lock(spinlock_t *lock)
{
- (void)lock;
+ (void)lock;
}
-void cfs_spin_unlock(cfs_spinlock_t *lock)
+void spin_unlock(spinlock_t *lock)
{
- (void)lock;
+ (void)lock;
}
-int cfs_spin_trylock(cfs_spinlock_t *lock)
+int spin_trylock(spinlock_t *lock)
{
- (void)lock;
+ (void)lock;
return 1;
}
-void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
+void spin_lock_bh_init(spinlock_t *lock)
{
- LASSERT(lock != NULL);
- (void)lock;
+ LASSERT(lock != NULL);
+ (void)lock;
}
-void cfs_spin_lock_bh(cfs_spinlock_t *lock)
+void spin_lock_bh(spinlock_t *lock)
{
- LASSERT(lock != NULL);
- (void)lock;
+ LASSERT(lock != NULL);
+ (void)lock;
}
-void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
+void spin_unlock_bh(spinlock_t *lock)
{
- LASSERT(lock != NULL);
- (void)lock;
+ LASSERT(lock != NULL);
+ (void)lock;
}
/*
* - __up(x)
*/
-void cfs_sema_init(cfs_semaphore_t *s, int val)
+void sema_init(struct semaphore *s, int val)
{
- LASSERT(s != NULL);
- (void)s;
- (void)val;
+ LASSERT(s != NULL);
+ (void)s;
+ (void)val;
}
-void __down(cfs_semaphore_t *s)
+void __down(struct semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-int __down_interruptible(cfs_semaphore_t *s)
+int __down_interruptible(struct semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
- return 0;
+ LASSERT(s != NULL);
+ (void)s;
+ return 0;
}
-void __up(cfs_semaphore_t *s)
+void __up(struct semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
* - wait_for_completion(c)
*/
-static cfs_wait_handler_t wait_handler;
+static wait_handler_t wait_handler;
-void cfs_init_completion_module(cfs_wait_handler_t handler)
+void init_completion_module(wait_handler_t handler)
{
- wait_handler = handler;
+ wait_handler = handler;
}
-int cfs_call_wait_handler(int timeout)
+int call_wait_handler(int timeout)
{
- if (!wait_handler)
- return -ENOSYS;
- return wait_handler(timeout);
+ if (!wait_handler)
+ return -ENOSYS;
+ return wait_handler(timeout);
}
-void cfs_init_completion(cfs_completion_t *c)
+void init_completion(struct completion *c)
{
- LASSERT(c != NULL);
- c->done = 0;
- cfs_waitq_init(&c->wait);
+ LASSERT(c != NULL);
+ c->done = 0;
+ cfs_waitq_init(&c->wait);
}
-void cfs_complete(cfs_completion_t *c)
+void complete(struct completion *c)
{
- LASSERT(c != NULL);
- c->done = 1;
- cfs_waitq_signal(&c->wait);
+ LASSERT(c != NULL);
+ c->done = 1;
+ cfs_waitq_signal(&c->wait);
}
-void cfs_wait_for_completion(cfs_completion_t *c)
+void wait_for_completion(struct completion *c)
{
- LASSERT(c != NULL);
- do {
- if (cfs_call_wait_handler(1000) < 0)
- break;
- } while (c->done == 0);
+ LASSERT(c != NULL);
+ do {
+ if (call_wait_handler(1000) < 0)
+ break;
+ } while (c->done == 0);
}
-int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
+int wait_for_completion_interruptible(struct completion *c)
{
- LASSERT(c != NULL);
- do {
- if (cfs_call_wait_handler(1000) < 0)
- break;
- } while (c->done == 0);
- return 0;
+ LASSERT(c != NULL);
+ do {
+ if (call_wait_handler(1000) < 0)
+ break;
+ } while (c->done == 0);
+ return 0;
}
/*
* - up_write(x)
*/
-void cfs_init_rwsem(cfs_rw_semaphore_t *s)
+void init_rwsem(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-void cfs_down_read(cfs_rw_semaphore_t *s)
+void down_read(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
+int down_read_trylock(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
return 1;
}
-void cfs_down_write(cfs_rw_semaphore_t *s)
+void down_write(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
+int down_write_trylock(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
return 1;
}
-void cfs_up_read(cfs_rw_semaphore_t *s)
+void up_read(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-void cfs_up_write(cfs_rw_semaphore_t *s)
+void up_write(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
-void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
+void fini_rwsem(struct rw_semaphore *s)
{
- LASSERT(s != NULL);
- (void)s;
+ LASSERT(s != NULL);
+ (void)s;
}
#ifdef HAVE_LIBPTHREAD
* Multi-threaded user space completion
*/
-void cfs_mt_init_completion(cfs_mt_completion_t *c)
+void mt_init_completion(mt_completion_t *c)
{
LASSERT(c != NULL);
c->c_done = 0;
pthread_cond_init(&c->c_cond, NULL);
}
-void cfs_mt_fini_completion(cfs_mt_completion_t *c)
+void mt_fini_completion(mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_destroy(&c->c_mut);
pthread_cond_destroy(&c->c_cond);
}
-void cfs_mt_complete(cfs_mt_completion_t *c)
+void mt_complete(mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_lock(&c->c_mut);
pthread_mutex_unlock(&c->c_mut);
}
-void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
+void mt_wait_for_completion(mt_completion_t *c)
{
LASSERT(c != NULL);
pthread_mutex_lock(&c->c_mut);
static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
-int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
+int mt_atomic_read(mt_atomic_t *a)
{
int r;
return r;
}
-void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
+void mt_atomic_set(mt_atomic_t *a, int b)
{
pthread_mutex_lock(&atomic_guard_lock);
a->counter = b;
pthread_mutex_unlock(&atomic_guard_lock);
}
-int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
+int mt_atomic_dec_and_test(mt_atomic_t *a)
{
int r;
return (r == 0);
}
-void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
+void mt_atomic_inc(mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
++a->counter;
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
+void mt_atomic_dec(mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
--a->counter;
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
+void mt_atomic_add(int b, mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
pthread_mutex_unlock(&atomic_guard_lock);
}
-void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
+void mt_atomic_sub(int b, mt_atomic_t *a)
{
pthread_mutex_lock(&atomic_guard_lock);
a->counter -= b;
(void)link;
/* well, wait for something to happen */
- cfs_call_wait_handler(0);
+ call_wait_handler(0);
}
int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
{
LASSERT(link != NULL);
(void)link;
- cfs_call_wait_handler(timeout);
+ call_wait_handler(timeout);
return 0;
}
#include "tracefile.h"
struct lc_watchdog {
- cfs_spinlock_t lcw_lock; /* check or change lcw_list */
+ spinlock_t lcw_lock; /* check or change lcw_list */
int lcw_refcount; /* must hold lcw_pending_timers_lock */
cfs_timer_t lcw_timer; /* kernel timer */
cfs_list_t lcw_list; /* chain on pending list */
* and lcw_stop_completion when it exits.
* Wake lcw_event_waitq to signal timer callback dispatches.
*/
-static cfs_completion_t lcw_start_completion;
-static cfs_completion_t lcw_stop_completion;
+static struct completion lcw_start_completion;
+static struct completion lcw_stop_completion;
static cfs_waitq_t lcw_event_waitq;
/*
* When it hits 0, we stop the dispatcher.
*/
static __u32 lcw_refcount = 0;
-static CFS_DEFINE_MUTEX(lcw_refcount_mutex);
+static DEFINE_MUTEX(lcw_refcount_mutex);
/*
* List of timers that have fired that need their callbacks run by the
{
ENTRY;
#if defined(HAVE_TASKLIST_LOCK)
- cfs_read_lock(&tasklist_lock);
+ read_lock(&tasklist_lock);
#else
rcu_read_lock();
#endif
}
#if defined(HAVE_TASKLIST_LOCK)
- cfs_read_unlock(&tasklist_lock);
+ read_unlock(&tasklist_lock);
#else
rcu_read_unlock();
#endif
lcw->lcw_state = LC_WATCHDOG_EXPIRED;
- cfs_spin_lock_bh(&lcw->lcw_lock);
- LASSERT(cfs_list_empty(&lcw->lcw_list));
+ spin_lock_bh(&lcw->lcw_lock);
+ LASSERT(cfs_list_empty(&lcw->lcw_list));
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- lcw->lcw_refcount++; /* +1 for pending list */
- cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
- cfs_waitq_signal(&lcw_event_waitq);
+ spin_lock_bh(&lcw_pending_timers_lock);
+ lcw->lcw_refcount++; /* +1 for pending list */
+ cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
+ cfs_waitq_signal(&lcw_event_waitq);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- cfs_spin_unlock_bh(&lcw->lcw_lock);
- EXIT;
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ spin_unlock_bh(&lcw->lcw_lock);
+ EXIT;
}
static int is_watchdog_fired(void)
{
- int rc;
+ int rc;
- if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags))
- return 1;
+ if (test_bit(LCW_FLAG_STOP, &lcw_flags))
+ return 1;
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- rc = !cfs_list_empty(&lcw_pending_timers);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- return rc;
+ spin_lock_bh(&lcw_pending_timers_lock);
+ rc = !cfs_list_empty(&lcw_pending_timers);
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ return rc;
}
static void lcw_dump_stack(struct lc_watchdog *lcw)
RECALC_SIGPENDING;
SIGNAL_MASK_UNLOCK(current, flags);
- cfs_complete(&lcw_start_completion);
+ complete(&lcw_start_completion);
while (1) {
int dumplog = 1;
cfs_wait_event_interruptible(lcw_event_waitq,
is_watchdog_fired(), rc);
CDEBUG(D_INFO, "Watchdog got woken up...\n");
- if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) {
- CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n");
-
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- rc = !cfs_list_empty(&lcw_pending_timers);
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- if (rc) {
- CERROR("pending timers list was not empty at "
- "time of watchdog dispatch shutdown\n");
- }
- break;
- }
-
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
+ if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
+ CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
+
+ spin_lock_bh(&lcw_pending_timers_lock);
+ rc = !cfs_list_empty(&lcw_pending_timers);
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ if (rc) {
+ CERROR("pending timers list was not empty at "
+ "time of watchdog dispatch shutdown\n");
+ }
+ break;
+ }
+
+ spin_lock_bh(&lcw_pending_timers_lock);
while (!cfs_list_empty(&lcw_pending_timers)) {
int is_dumplog;
/* +1 ref for callback to make sure lwc wouldn't be
* deleted after releasing lcw_pending_timers_lock */
lcw->lcw_refcount++;
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-
- /* lock ordering */
- cfs_spin_lock_bh(&lcw->lcw_lock);
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
-
- if (cfs_list_empty(&lcw->lcw_list)) {
- /* already removed from pending list */
- lcw->lcw_refcount--; /* -1 ref for callback */
- if (lcw->lcw_refcount == 0)
- cfs_list_add(&lcw->lcw_list, &zombies);
- cfs_spin_unlock_bh(&lcw->lcw_lock);
+ spin_unlock_bh(&lcw_pending_timers_lock);
+
+ /* lock ordering */
+ spin_lock_bh(&lcw->lcw_lock);
+ spin_lock_bh(&lcw_pending_timers_lock);
+
+ if (cfs_list_empty(&lcw->lcw_list)) {
+ /* already removed from pending list */
+ lcw->lcw_refcount--; /* -1 ref for callback */
+ if (lcw->lcw_refcount == 0)
+ cfs_list_add(&lcw->lcw_list, &zombies);
+ spin_unlock_bh(&lcw->lcw_lock);
/* still hold lcw_pending_timers_lock */
continue;
}
cfs_list_del_init(&lcw->lcw_list);
lcw->lcw_refcount--; /* -1 ref for pending list */
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- cfs_spin_unlock_bh(&lcw->lcw_lock);
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ spin_unlock_bh(&lcw->lcw_lock);
CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
lcw->lcw_pid);
dumplog = 0;
}
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- lcw->lcw_refcount--; /* -1 ref for callback */
- if (lcw->lcw_refcount == 0)
- cfs_list_add(&lcw->lcw_list, &zombies);
- }
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+ spin_lock_bh(&lcw_pending_timers_lock);
+ lcw->lcw_refcount--; /* -1 ref for callback */
+ if (lcw->lcw_refcount == 0)
+ cfs_list_add(&lcw->lcw_list, &zombies);
+ }
+ spin_unlock_bh(&lcw_pending_timers_lock);
while (!cfs_list_empty(&zombies)) {
lcw = cfs_list_entry(lcw_pending_timers.next,
}
}
- cfs_complete(&lcw_stop_completion);
+ complete(&lcw_stop_completion);
- RETURN(rc);
+ RETURN(rc);
}
static void lcw_dispatch_start(void)
{
- int rc;
+ int rc;
- ENTRY;
- LASSERT(lcw_refcount == 1);
+ ENTRY;
+ LASSERT(lcw_refcount == 1);
- cfs_init_completion(&lcw_stop_completion);
- cfs_init_completion(&lcw_start_completion);
+ init_completion(&lcw_stop_completion);
+ init_completion(&lcw_start_completion);
cfs_waitq_init(&lcw_event_waitq);
CDEBUG(D_INFO, "starting dispatch thread\n");
EXIT;
return;
}
- cfs_wait_for_completion(&lcw_start_completion);
- CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
+ wait_for_completion(&lcw_start_completion);
+ CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
- EXIT;
+ EXIT;
}
static void lcw_dispatch_stop(void)
{
- ENTRY;
- LASSERT(lcw_refcount == 0);
+ ENTRY;
+ LASSERT(lcw_refcount == 0);
- CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
+ CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
- cfs_set_bit(LCW_FLAG_STOP, &lcw_flags);
- cfs_waitq_signal(&lcw_event_waitq);
+ set_bit(LCW_FLAG_STOP, &lcw_flags);
+ cfs_waitq_signal(&lcw_event_waitq);
- cfs_wait_for_completion(&lcw_stop_completion);
+ wait_for_completion(&lcw_stop_completion);
- CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
+ CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
- EXIT;
+ EXIT;
}
struct lc_watchdog *lc_watchdog_add(int timeout,
RETURN(ERR_PTR(-ENOMEM));
}
- cfs_spin_lock_init(&lcw->lcw_lock);
+ spin_lock_init(&lcw->lcw_lock);
lcw->lcw_refcount = 1; /* refcount for owner */
lcw->lcw_task = cfs_current();
lcw->lcw_pid = cfs_curproc_pid();
CFS_INIT_LIST_HEAD(&lcw->lcw_list);
cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
- cfs_mutex_lock(&lcw_refcount_mutex);
- if (++lcw_refcount == 1)
- lcw_dispatch_start();
- cfs_mutex_unlock(&lcw_refcount_mutex);
+ mutex_lock(&lcw_refcount_mutex);
+ if (++lcw_refcount == 1)
+ lcw_dispatch_start();
+ mutex_unlock(&lcw_refcount_mutex);
/* Keep this working in case we enable them by default */
if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
{
- cfs_spin_lock_bh(&lcw->lcw_lock);
- if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- cfs_list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- }
-
- cfs_spin_unlock_bh(&lcw->lcw_lock);
+ spin_lock_bh(&lcw->lcw_lock);
+ if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+ spin_lock_bh(&lcw_pending_timers_lock);
+ cfs_list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ }
+
+ spin_unlock_bh(&lcw->lcw_lock);
}
void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
lcw_update_time(lcw, "stopped");
- cfs_spin_lock_bh(&lcw->lcw_lock);
- cfs_spin_lock_bh(&lcw_pending_timers_lock);
- if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
- cfs_list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
- }
+ spin_lock_bh(&lcw->lcw_lock);
+ spin_lock_bh(&lcw_pending_timers_lock);
+ if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+ cfs_list_del_init(&lcw->lcw_list);
+ lcw->lcw_refcount--; /* -1 ref for pending list */
+ }
- lcw->lcw_refcount--; /* -1 ref for owner */
- dead = lcw->lcw_refcount == 0;
- cfs_spin_unlock_bh(&lcw_pending_timers_lock);
- cfs_spin_unlock_bh(&lcw->lcw_lock);
+ lcw->lcw_refcount--; /* -1 ref for owner */
+ dead = lcw->lcw_refcount == 0;
+ spin_unlock_bh(&lcw_pending_timers_lock);
+ spin_unlock_bh(&lcw->lcw_lock);
- if (dead)
- LIBCFS_FREE(lcw, sizeof(*lcw));
+ if (dead)
+ LIBCFS_FREE(lcw, sizeof(*lcw));
- cfs_mutex_lock(&lcw_refcount_mutex);
- if (--lcw_refcount == 0)
- lcw_dispatch_stop();
- cfs_mutex_unlock(&lcw_refcount_mutex);
+ mutex_lock(&lcw_refcount_mutex);
+ if (--lcw_refcount == 0)
+ lcw_dispatch_stop();
+ mutex_unlock(&lcw_refcount_mutex);
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(lc_watchdog_delete);
PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
- cfs_spin_lock(&(cfs_win_task_manger.Lock));
+ spin_lock(&(cfs_win_task_manger.Lock));
ListEntry = cfs_win_task_manger.TaskList.Flink;
while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
ListEntry = ListEntry->Flink;
}
- cfs_spin_unlock(&(cfs_win_task_manger.Lock));
+ spin_unlock(&(cfs_win_task_manger.Lock));
}
int
cfs_win_task_manger.Magic = TASKMAN_MAGIC;
/* initialize the spinlock protection */
- cfs_spin_lock_init(&cfs_win_task_manger.Lock);
+ spin_lock_init(&cfs_win_task_manger.Lock);
/* create slab memory cache */
cfs_win_task_manger.slab = cfs_mem_cache_create(
}
/* cleanup all the taskslots attached to the list */
- cfs_spin_lock(&(cfs_win_task_manger.Lock));
+ spin_lock(&(cfs_win_task_manger.Lock));
while (!IsListEmpty(&(cfs_win_task_manger.TaskList))) {
cleanup_task_slot(TaskSlot);
}
- cfs_spin_unlock(&cfs_win_task_manger.Lock);
+ spin_unlock(&cfs_win_task_manger.Lock);
/* destroy the taskslot cache slab */
cfs_mem_cache_destroy(cfs_win_task_manger.slab);
PLIST_ENTRY ListEntry = NULL;
PTASK_SLOT TaskSlot = NULL;
- cfs_spin_lock(&(cfs_win_task_manger.Lock));
+ spin_lock(&(cfs_win_task_manger.Lock));
ListEntry = cfs_win_task_manger.TaskList.Flink;
while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
errorout:
- cfs_spin_unlock(&(cfs_win_task_manger.Lock));
+ spin_unlock(&(cfs_win_task_manger.Lock));
if (!TaskSlot) {
cfs_enter_debugger();
return cfs_atomic_add_return(-i, v);
}
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock)
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock)
{
- if (cfs_atomic_read(v) != 1) {
- return 0;
- }
+ if (cfs_atomic_read(v) != 1)
+ return 0;
- cfs_spin_lock(lock);
+ spin_lock(lock);
if (cfs_atomic_dec_and_test(v))
return 1;
- cfs_spin_unlock(lock);
+ spin_unlock(lock);
return 0;
}
void
-cfs_rwlock_init(cfs_rwlock_t * rwlock)
+rwlock_init(rwlock_t *rwlock)
{
- cfs_spin_lock_init(&rwlock->guard);
- rwlock->count = 0;
+ spin_lock_init(&rwlock->guard);
+ rwlock->count = 0;
}
void
-cfs_rwlock_fini(cfs_rwlock_t * rwlock)
+cfs_rwlock_fini(rwlock_t *rwlock)
{
}
void
-cfs_read_lock(cfs_rwlock_t * rwlock)
+read_lock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot->irql = KeRaiseIrqlToDpcLevel();
- while (TRUE) {
- cfs_spin_lock(&rwlock->guard);
- if (rwlock->count >= 0)
- break;
- cfs_spin_unlock(&rwlock->guard);
- }
+ while (TRUE) {
+ spin_lock(&rwlock->guard);
+ if (rwlock->count >= 0)
+ break;
+ spin_unlock(&rwlock->guard);
+ }
rwlock->count++;
- cfs_spin_unlock(&rwlock->guard);
+ spin_unlock(&rwlock->guard);
}
void
-cfs_read_unlock(cfs_rwlock_t * rwlock)
+read_unlock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
ASSERT(slot->Magic == TASKSLT_MAGIC);
-
- cfs_spin_lock(&rwlock->guard);
+
+ spin_lock(&rwlock->guard);
ASSERT(rwlock->count > 0);
- rwlock->count--;
- if (rwlock < 0) {
- cfs_enter_debugger();
- }
- cfs_spin_unlock(&rwlock->guard);
+ rwlock->count--;
+ if (rwlock < 0)
+ cfs_enter_debugger();
+ spin_unlock(&rwlock->guard);
- KeLowerIrql(slot->irql);
+ KeLowerIrql(slot->irql);
}
void
-cfs_write_lock(cfs_rwlock_t * rwlock)
+write_lock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot->irql = KeRaiseIrqlToDpcLevel();
- while (TRUE) {
- cfs_spin_lock(&rwlock->guard);
- if (rwlock->count == 0)
- break;
- cfs_spin_unlock(&rwlock->guard);
- }
+ while (TRUE) {
+ spin_lock(&rwlock->guard);
+ if (rwlock->count == 0)
+ break;
+ spin_unlock(&rwlock->guard);
+ }
rwlock->count = -1;
- cfs_spin_unlock(&rwlock->guard);
+ spin_unlock(&rwlock->guard);
}
void
-cfs_write_unlock(cfs_rwlock_t * rwlock)
+write_unlock(rwlock_t *rwlock)
{
cfs_task_t * task = cfs_current();
PTASK_SLOT slot = NULL;
slot = CONTAINING_RECORD(task, TASK_SLOT, task);
ASSERT(slot->Magic == TASKSLT_MAGIC);
-
- cfs_spin_lock(&rwlock->guard);
+
+ spin_lock(&rwlock->guard);
ASSERT(rwlock->count == -1);
- rwlock->count = 0;
- cfs_spin_unlock(&rwlock->guard);
+ rwlock->count = 0;
+ spin_unlock(&rwlock->guard);
- KeLowerIrql(slot->irql);
+ KeLowerIrql(slot->irql);
}
pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
pg->mapping = addr;
cfs_atomic_set(&pg->count, 1);
- cfs_set_bit(PG_virt, &(pg->flags));
+ set_bit(PG_virt, &(pg->flags));
cfs_enter_debugger();
return pg;
}
ASSERT(pg->addr != NULL);
ASSERT(cfs_atomic_read(&pg->count) <= 1);
- if (!cfs_test_bit(PG_virt, &pg->flags)) {
+ if (!test_bit(PG_virt, &pg->flags)) {
cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
cfs_atomic_dec(&libcfs_total_pages);
} else {
ExFreeToNPagedLookasideList(&(kmc->npll), buf);
}
-cfs_spinlock_t shrinker_guard = {0};
+spinlock_t shrinker_guard = {0};
CFS_LIST_HEAD(shrinker_hdr);
cfs_timer_t shrinker_timer = {0};
{
struct cfs_shrinker * s = (struct cfs_shrinker *)
cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
- if (s) {
- s->cb = cb;
- s->seeks = seeks;
- s->nr = 2;
- cfs_spin_lock(&shrinker_guard);
- cfs_list_add(&s->list, &shrinker_hdr);
- cfs_spin_unlock(&shrinker_guard);
- }
-
- return s;
+ if (s) {
+ s->cb = cb;
+ s->seeks = seeks;
+ s->nr = 2;
+ spin_lock(&shrinker_guard);
+ cfs_list_add(&s->list, &shrinker_hdr);
+ spin_unlock(&shrinker_guard);
+ }
+
+ return s;
}
void cfs_remove_shrinker(struct cfs_shrinker *s)
{
- struct cfs_shrinker *tmp;
- cfs_spin_lock(&shrinker_guard);
+ struct cfs_shrinker *tmp;
+ spin_lock(&shrinker_guard);
#if TRUE
cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
struct cfs_shrinker, list) {
#else
cfs_list_del(&s->list);
#endif
- cfs_spin_unlock(&shrinker_guard);
- cfs_free(s);
+ spin_unlock(&shrinker_guard);
+ cfs_free(s);
}
/* time ut test proc */
void shrinker_timer_proc(ulong_ptr_t arg)
{
- struct cfs_shrinker *s;
- cfs_spin_lock(&shrinker_guard);
-
- cfs_list_for_each_entry_typed(s, &shrinker_hdr,
- struct cfs_shrinker, list) {
- s->cb(s->nr, __GFP_FS);
- }
- cfs_spin_unlock(&shrinker_guard);
- cfs_timer_arm(&shrinker_timer, 300);
+ struct cfs_shrinker *s;
+ spin_lock(&shrinker_guard);
+
+ cfs_list_for_each_entry_typed(s, &shrinker_hdr,
+ struct cfs_shrinker, list) {
+ s->cb(s->nr, __GFP_FS);
+ }
+ spin_unlock(&shrinker_guard);
+ cfs_timer_arm(&shrinker_timer, 300);
}
int start_shrinker_timer()
*/
-static CFS_DECLARE_RWSEM(cfs_symbol_lock);
+static DECLARE_RWSEM(cfs_symbol_lock);
CFS_LIST_HEAD(cfs_symbol_list);
int libcfs_is_mp_system = FALSE;
cfs_list_t *walker;
struct cfs_symbol *sym = NULL;
- cfs_down_read(&cfs_symbol_lock);
+ down_read(&cfs_symbol_lock);
cfs_list_for_each(walker, &cfs_symbol_list) {
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
break;
}
}
- cfs_up_read(&cfs_symbol_lock);
+ up_read(&cfs_symbol_lock);
if (sym != NULL)
return sym->value;
cfs_list_t *walker;
struct cfs_symbol *sym = NULL;
- cfs_down_read(&cfs_symbol_lock);
+ down_read(&cfs_symbol_lock);
cfs_list_for_each(walker, &cfs_symbol_list) {
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
break;
}
}
- cfs_up_read(&cfs_symbol_lock);
+ up_read(&cfs_symbol_lock);
LASSERT(sym != NULL);
}
new->ref = 0;
CFS_INIT_LIST_HEAD(&new->sym_list);
- cfs_down_write(&cfs_symbol_lock);
- cfs_list_for_each(walker, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
- if (!strcmp(sym->name, name)) {
- cfs_up_write(&cfs_symbol_lock);
- cfs_free(new);
- return 0; // alreay registerred
- }
- }
- cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
- cfs_up_write(&cfs_symbol_lock);
+ down_write(&cfs_symbol_lock);
+ cfs_list_for_each(walker, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ if (!strcmp(sym->name, name)) {
+ up_write(&cfs_symbol_lock);
+ cfs_free(new);
+ return 0; /* alreay registerred */
+ }
+ }
+ cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
+ up_write(&cfs_symbol_lock);
return 0;
}
cfs_list_t *nxt;
struct cfs_symbol *sym = NULL;
- cfs_down_write(&cfs_symbol_lock);
+ down_write(&cfs_symbol_lock);
cfs_list_for_each_safe(walker, nxt, &cfs_symbol_list) {
sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
if (!strcmp(sym->name, name)) {
break;
}
}
- cfs_up_write(&cfs_symbol_lock);
+ up_write(&cfs_symbol_lock);
}
/*
cfs_list_t *walker;
struct cfs_symbol *sym = NULL;
- cfs_down_write(&cfs_symbol_lock);
- cfs_list_for_each(walker, &cfs_symbol_list) {
- sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
- LASSERT(sym->ref == 0);
- cfs_list_del (&sym->sym_list);
- cfs_free(sym);
- }
- cfs_up_write(&cfs_symbol_lock);
- return;
+ down_write(&cfs_symbol_lock);
+ cfs_list_for_each(walker, &cfs_symbol_list) {
+ sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+ LASSERT(sym->ref == 0);
+ cfs_list_del (&sym->sym_list);
+ cfs_free(sym);
+ }
+ up_write(&cfs_symbol_lock);
+ return;
}
int
libcfs_arch_init(void)
{
- int rc;
+ int rc;
+ spinlock_t lock;
- cfs_spinlock_t lock;
- /* Workground to check the system is MP build or UP build */
- cfs_spin_lock_init(&lock);
- cfs_spin_lock(&lock);
- libcfs_is_mp_system = (int)lock.lock;
- /* MP build system: it's a real spin, for UP build system, it
- only raises the IRQL to DISPATCH_LEVEL */
- cfs_spin_unlock(&lock);
+ /* Workground to check the system is MP build or UP build */
+ spin_lock_init(&lock);
+ spin_lock(&lock);
+ libcfs_is_mp_system = (int)lock.lock;
+ /* MP build system: it's a real spin, for UP build system, it
+ * only raises the IRQL to DISPATCH_LEVEL */
+ spin_unlock(&lock);
/* initialize libc routines (confliction between libcnptr.lib
and kernel ntoskrnl.lib) */
/* The global lock to protect all the access */
#if LIBCFS_PROCFS_SPINLOCK
-cfs_spinlock_t proc_fs_lock;
+spinlock_t proc_fs_lock;
-#define INIT_PROCFS_LOCK() cfs_spin_lock_init(&proc_fs_lock)
-#define LOCK_PROCFS() cfs_spin_lock(&proc_fs_lock)
-#define UNLOCK_PROCFS() cfs_spin_unlock(&proc_fs_lock)
+#define INIT_PROCFS_LOCK() spin_lock_init(&proc_fs_lock)
+#define LOCK_PROCFS() spin_lock(&proc_fs_lock)
+#define UNLOCK_PROCFS() spin_unlock(&proc_fs_lock)
#else
-cfs_mutex_t proc_fs_lock;
+struct mutex proc_fs_lock;
#define INIT_PROCFS_LOCK() cfs_init_mutex(&proc_fs_lock)
#define LOCK_PROCFS() cfs_mutex_down(&proc_fs_lock)
file->private_data = p;
}
memset(p, 0, sizeof(*p));
- cfs_mutex_init(&p->lock);
+ mutex_init(&p->lock);
p->op = op;
/*
void *p;
int err = 0;
- cfs_mutex_lock(&m->lock);
+ mutex_lock(&m->lock);
/*
* seq_file->op->..m_start/m_stop/m_next may do special actions
* or optimisations based on the file->f_version, so we want to
else
*ppos += copied;
file->f_version = m->version;
- cfs_mutex_unlock(&m->lock);
+ mutex_unlock(&m->lock);
return copied;
Enomem:
err = -ENOMEM;
struct seq_file *m = (struct seq_file *)file->private_data;
long long retval = -EINVAL;
- cfs_mutex_lock(&m->lock);
+ mutex_lock(&m->lock);
m->version = file->f_version;
switch (origin) {
case 1:
}
}
file->f_version = m->version;
- cfs_mutex_unlock(&m->lock);
+ mutex_unlock(&m->lock);
return retval;
}
EXPORT_SYMBOL(seq_lseek);
waitq->magic = CFS_WAITQ_MAGIC;
waitq->flags = 0;
CFS_INIT_LIST_HEAD(&(waitq->waiters));
- cfs_spin_lock_init(&(waitq->guard));
+ spin_lock_init(&(waitq->guard));
}
/*
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
LASSERT(waitqid < CFS_WAITQ_CHANNELS);
- cfs_spin_lock(&(waitq->guard));
+ spin_lock(&(waitq->guard));
LASSERT(link->waitq[waitqid].waitq == NULL);
link->waitq[waitqid].waitq = waitq;
if (link->flags & CFS_WAITQ_EXCLUSIVE) {
} else {
cfs_list_add(&link->waitq[waitqid].link, &waitq->waiters);
}
- cfs_spin_unlock(&(waitq->guard));
+ spin_unlock(&(waitq->guard));
}
/*
* cfs_waitq_add
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
LASSERT(link->magic == CFS_WAITLINK_MAGIC);
- cfs_spin_lock(&(waitq->guard));
+ spin_lock(&(waitq->guard));
for (i=0; i < CFS_WAITQ_CHANNELS; i++) {
if (link->waitq[i].waitq == waitq)
cfs_enter_debugger();
}
- cfs_spin_unlock(&(waitq->guard));
+ spin_unlock(&(waitq->guard));
}
/*
LASSERT(waitq != NULL);
LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
- cfs_spin_lock(&waitq->guard);
+ spin_lock(&waitq->guard);
cfs_list_for_each_entry_typed(scan, &waitq->waiters,
cfs_waitlink_channel_t,
link) {
break;
}
- cfs_spin_unlock(&waitq->guard);
- return;
+ spin_unlock(&waitq->guard);
+ return;
}
/*
{
PKS_TSDU KsTsdu = NULL;
- cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+ spin_lock(&(ks_data.ksnd_tsdu_lock));
if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
ks_data.ksnd_tsdu_slab, 0);
}
- cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ spin_unlock(&(ks_data.ksnd_tsdu_lock));
if (NULL != KsTsdu) {
RtlZeroMemory(KsTsdu, ks_data.ksnd_tsdu_size);
PKS_TSDU KsTsdu
)
{
- cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
- if (ks_data.ksnd_nfreetsdus > 128) {
- KsFreeKsTsdu(KsTsdu);
- } else {
- cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
- ks_data.ksnd_nfreetsdus++;
- }
- cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ spin_lock(&(ks_data.ksnd_tsdu_lock));
+ if (ks_data.ksnd_nfreetsdus > 128) {
+ KsFreeKsTsdu(KsTsdu);
+ } else {
+ cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+ ks_data.ksnd_nfreetsdus++;
+ }
+ spin_unlock(&(ks_data.ksnd_tsdu_lock));
}
/* with tconn lock acquired */
TsduMgr->NumOfTsdu = 0;
TsduMgr->TotalBytes = 0;
- cfs_spin_lock_init(&TsduMgr->Lock);
+ spin_lock_init(&TsduMgr->Lock);
}
LASSERT(child->kstc_type == kstt_child);
- cfs_spin_lock(&(child->kstc_lock));
+ spin_lock(&(child->kstc_lock));
LASSERT(parent->kstc_state == ksts_listening);
LASSERT(child->kstc_state == ksts_connecting);
FALSE
);
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
KsPrint((2, "KsAcceptCompletionRoutine: singal parent: %p (child: %p)\n",
parent, child));
child->child.kstc_busy = FALSE;
child->kstc_state = ksts_associated;
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
}
/* now free the Irp */
ks_addr_slot_t * slot = NULL;
PLIST_ENTRY list = NULL;
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
list = ks_data.ksnd_addrs_list.Flink;
while (list != &ks_data.ksnd_addrs_list) {
slot = NULL;
}
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
- return slot;
+ return slot;
}
void
KsCleanupIpAddresses()
{
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
while (!IsListEmpty(&ks_data.ksnd_addrs_list)) {
}
cfs_assert(ks_data.ksnd_naddrs == 0);
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
}
VOID
slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
if (slot != NULL) {
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
sprintf(slot->iface, "eth%d", ks_data.ksnd_naddrs++);
slot->ip_addr = ntohl(IpAddress->in_addr);
slot->devname.Length = DeviceName->Length;
slot->devname.MaximumLength = DeviceName->Length + sizeof(WCHAR);
slot->devname.Buffer = slot->buffer;
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
KsPrint((0, "KsAddAddressHandle: %s added: ip=%xh(%d.%d.%d.%d)\n",
slot->iface, IpAddress->in_addr,
/* initialize the global ks_data members */
RtlInitUnicodeString(&ks_data.ksnd_client_name, TDILND_MODULE_NAME);
- cfs_spin_lock_init(&ks_data.ksnd_addrs_lock);
+ spin_lock_init(&ks_data.ksnd_addrs_lock);
InitializeListHead(&ks_data.ksnd_addrs_list);
/* register the pnp handlers */
cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
- cfs_spin_lock(&(child->kstc_lock));
+ spin_lock(&(child->kstc_lock));
if (!child->child.kstc_busy) {
LASSERT(child->kstc_state == ksts_associated);
child->child.kstc_busy = TRUE;
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
break;
} else {
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
child = NULL;
}
}
LASSERT(parent->kstc_type == kstt_listener);
- cfs_spin_lock(&(parent->kstc_lock));
+ spin_lock(&(parent->kstc_lock));
if (parent->kstc_state == ksts_listening) {
if (child) {
- cfs_spin_lock(&(child->kstc_lock));
+ spin_lock(&(child->kstc_lock));
child->child.kstc_info.ConnectionInfo = ConnectionInfo;
child->child.kstc_info.Remote = ConnectionInfo->RemoteAddress;
child->kstc_state = ksts_connecting;
- cfs_spin_unlock(&(child->kstc_lock));
+ spin_unlock(&(child->kstc_lock));
} else {
goto errorout;
}
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
return Status;
errorout:
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
*AcceptIrp = NULL;
*ConnectionContext = NULL;
KeSetEvent(&(WorkItem->Event), 0, FALSE);
- cfs_spin_lock(&(tconn->kstc_lock));
- cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
- cfs_spin_unlock(&(tconn->kstc_lock));
- ks_put_tconn(tconn);
+ spin_lock(&(tconn->kstc_lock));
+ cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
+ spin_unlock(&(tconn->kstc_lock));
+ ks_put_tconn(tconn);
}
tconn, DisconnectFlags));
ks_get_tconn(tconn);
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
WorkItem = &(tconn->kstc_disconnect);
}
}
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
ks_put_tconn(tconn);
return (Status);
tconn
);
- cfs_spin_lock_init(&(tconn->kstc_lock));
+ spin_lock_init(&(tconn->kstc_lock));
ks_get_tconn(tconn);
- cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+ spin_lock(&(ks_data.ksnd_tconn_lock));
/* attach it into global list in ks_data */
cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
ks_data.ksnd_ntconns++;
- cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+ spin_unlock(&(ks_data.ksnd_tconn_lock));
tconn->kstc_rcv_wnd = tconn->kstc_snd_wnd = 0x10000;
}
{
LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
- cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+ spin_lock(&(ks_data.ksnd_tconn_lock));
/* remove it from the global list */
cfs_list_del(&tconn->kstc_list);
if (ks_data.ksnd_ntconns == 0) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
- cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+ spin_unlock(&(ks_data.ksnd_tconn_lock));
/* free the structure memory */
cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
{
if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
if ( ( tconn->kstc_type == kstt_child ||
tconn->kstc_type == kstt_sender ) &&
( tconn->kstc_state == ksts_connected ) ) {
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
ks_abort_tconn(tconn);
cfs_set_flag(tconn->kstc_flags, KS_TCONN_DESTROY_BUSY);
}
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
}
}
}
tconn->kstc_addr.FileObject
);
- cfs_spin_lock(&tconn->child.kstc_parent->kstc_lock);
- cfs_spin_lock(&tconn->kstc_lock);
+ spin_lock(&tconn->child.kstc_parent->kstc_lock);
+ spin_lock(&tconn->kstc_lock);
tconn->kstc_state = ksts_inited;
tconn->child.kstc_queued = FALSE;
}
- cfs_spin_unlock(&tconn->kstc_lock);
- cfs_spin_unlock(&tconn->child.kstc_parent->kstc_lock);
+ spin_unlock(&tconn->kstc_lock);
+ spin_unlock(&tconn->child.kstc_parent->kstc_lock);
/* drop the reference of the parent tconn */
ks_put_tconn(tconn->child.kstc_parent);
NULL
);
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
if (NT_SUCCESS(status)) {
tconn->sender.kstc_info.ConnectionInfo = ConnectionInfo;
tconn->sender.kstc_info.Remote = ConnectionInfo->RemoteAddress;
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
} else {
rc = cfs_error_code(status);
tconn->kstc_state = ksts_associated;
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
/* disassocidate the connection and the address object,
after cleanup, it's safe to set the state to abort ... */
cfs_enter_debugger();
}
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
/* cleanup the tsdumgr Lists */
KsCleanupTsdu (tconn);
info->ConnectionInfo = NULL;
info->Remote = NULL;
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
}
status = STATUS_SUCCESS;
WorkItem = &(tconn->kstc_disconnect);
ks_get_tconn(tconn);
- cfs_spin_lock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
if (tconn->kstc_state != ksts_connected) {
ks_put_tconn(tconn);
}
}
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_unlock(&(tconn->kstc_lock));
}
engs = &TsduMgr->Slot;
if (!engs->queued) {
- cfs_spin_lock(&engm->lock);
+ spin_lock(&engm->lock);
if (!engs->queued) {
cfs_list_add_tail(&engs->link, &engm->list);
engs->queued = TRUE;
engs->tsdumgr = TsduMgr;
KeSetEvent(&(engm->start),0, FALSE);
}
- cfs_spin_unlock(&engm->lock);
+ spin_unlock(&engm->lock);
KsPrint((4, "KsQueueTdiEngine: TsduMgr=%p is queued to engine %p\n",
TsduMgr, engm));
}
if (engs->queued) {
engm = engs->emgr;
LASSERT(engm != NULL);
- cfs_spin_lock(&engm->lock);
+ spin_lock(&engm->lock);
if (engs->queued) {
cfs_list_del(&engs->link);
engs->queued = FALSE;
engs->emgr = NULL;
engs->tsdumgr = NULL;
}
- cfs_spin_unlock(&engm->lock);
+ spin_unlock(&engm->lock);
KsPrint((4, "KsQueueTdiEngine: TsduMgr %p is removed from engine %p\n",
TsduMgr, engm));
}
cfs_wait_event_internal(&engm->start, 0);
- cfs_spin_lock(&engm->lock);
+ spin_lock(&engm->lock);
if (cfs_list_empty(&engm->list)) {
- cfs_spin_unlock(&engm->lock);
+ spin_unlock(&engm->lock);
continue;
}
LASSERT(engs->queued);
engs->emgr = NULL;
engs->queued = FALSE;
- cfs_spin_unlock(&engm->lock);
+ spin_unlock(&engm->lock);
tconn = engs->tconn;
LASSERT(tconn->kstc_magic == KS_TCONN_MAGIC);
/* initialize tconn related globals */
RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
- cfs_spin_lock_init(&ks_data.ksnd_tconn_lock);
+ spin_lock_init(&ks_data.ksnd_tconn_lock);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
}
/* initialize tsdu related globals */
- cfs_spin_lock_init(&ks_data.ksnd_tsdu_lock);
+ spin_lock_init(&ks_data.ksnd_tsdu_lock);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
goto errorout;
}
for (i = 0; i < ks_data.ksnd_engine_nums; i++) {
- cfs_spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
+ spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
}
/* we need wait until all the tconn are freed */
- cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+ spin_lock(&(ks_data.ksnd_tconn_lock));
if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
cfs_wake_event(&ks_data.ksnd_tconn_exit);
}
- cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+ spin_unlock(&(ks_data.ksnd_tconn_lock));
/* now wait on the tconn exit event */
cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
ks_data.ksnd_tconn_slab = NULL;
/* clean up all the tsud buffers in the free list */
- cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+ spin_lock(&(ks_data.ksnd_tsdu_lock));
cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
ks_data.ksnd_tsdu_slab,
KsTsdu );
}
- cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+ spin_unlock(&(ks_data.ksnd_tsdu_lock));
/* it's safe to delete the tsdu slab ... */
cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
/* create the backlog child tconn */
backlog = ks_create_child_tconn(parent);
- cfs_spin_lock(&(parent->kstc_lock));
+ spin_lock(&(parent->kstc_lock));
if (backlog) {
- cfs_spin_lock(&backlog->kstc_lock);
+ spin_lock(&backlog->kstc_lock);
/* attch it into the listing list of daemon */
cfs_list_add( &backlog->child.kstc_link,
&parent->listener.kstc_listening.list );
parent->listener.kstc_listening.num++;
backlog->child.kstc_queued = TRUE;
- cfs_spin_unlock(&backlog->kstc_lock);
+ spin_unlock(&backlog->kstc_lock);
} else {
cfs_enter_debugger();
}
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
}
}
return rc;
}
- cfs_spin_lock(&(tconn->kstc_lock));
- tconn->listener.nbacklog = nbacklog;
- tconn->kstc_state = ksts_listening;
- cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
- cfs_spin_unlock(&(tconn->kstc_lock));
+ spin_lock(&(tconn->kstc_lock));
+ tconn->listener.nbacklog = nbacklog;
+ tconn->kstc_state = ksts_listening;
+ cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
+ spin_unlock(&(tconn->kstc_lock));
- return rc;
+ return rc;
}
void
/* reset all tdi event callbacks to NULL */
KsResetHandlers (tconn);
- cfs_spin_lock(&tconn->kstc_lock);
+ spin_lock(&tconn->kstc_lock);
cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
ks_put_tconn(backlog);
}
- cfs_spin_unlock(&tconn->kstc_lock);
+ spin_unlock(&tconn->kstc_lock);
/* wake up it from the waiting on new incoming connections */
KeSetEvent(&tconn->listener.kstc_accept_event, 0, FALSE);
ks_replenish_backlogs(parent, parent->listener.nbacklog);
- cfs_spin_lock(&(parent->kstc_lock));
+ spin_lock(&(parent->kstc_lock));
- if (parent->listener.kstc_listening.num <= 0) {
- cfs_spin_unlock(&(parent->kstc_lock));
+ if (parent->listener.kstc_listening.num <= 0) {
+ spin_unlock(&(parent->kstc_lock));
return -1;
}
cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
- cfs_spin_lock(&(backlog->kstc_lock));
+ spin_lock(&(backlog->kstc_lock));
if (backlog->child.kstc_accepted) {
parent->listener.kstc_listening.num--;
backlog->child.kstc_queueno = 1;
- cfs_spin_unlock(&(backlog->kstc_lock));
+ spin_unlock(&(backlog->kstc_lock));
break;
} else {
- cfs_spin_unlock(&(backlog->kstc_lock));
+ spin_unlock(&(backlog->kstc_lock));
backlog = NULL;
}
}
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
/* we need wait until new incoming connections are requested
or the case of shuting down the listenig daemon thread */
NULL
);
- cfs_spin_lock(&(parent->kstc_lock));
+ spin_lock(&(parent->kstc_lock));
/* check whether it's exptected to exit ? */
if (!cfs_is_flag_set(parent->kstc_flags, KS_TCONN_DAEMON_STARTED)) {
- cfs_spin_unlock(&(parent->kstc_lock));
+ spin_unlock(&(parent->kstc_lock));
} else {
goto again;
}
ks_addr_slot_t * slot = NULL;
PLIST_ENTRY list = NULL;
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
list = ks_data.ksnd_addrs_list.Flink;
while (list != &ks_data.ksnd_addrs_list) {
slot = NULL;
}
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
return (int)(slot == NULL);
}
PLIST_ENTRY list = NULL;
int nips = 0;
- cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+ spin_lock(&ks_data.ksnd_addrs_lock);
*names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
if (*names == NULL) {
errorout:
- cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+ spin_unlock(&ks_data.ksnd_addrs_lock);
return nips;
}
{
LASSERT(sock->kstc_type == kstt_listener);
- cfs_spin_lock(&(sock->kstc_lock));
+ spin_lock(&(sock->kstc_lock));
/* clear the daemon flag */
cfs_clear_flag(sock->kstc_flags, KS_TCONN_DAEMON_STARTED);
/* wake up it from the waiting on new incoming connections */
KeSetEvent(&sock->listener.kstc_accept_event, 0, FALSE);
- cfs_spin_unlock(&(sock->kstc_lock));
+ spin_unlock(&(sock->kstc_lock));
}
/*
{
PTRANSPORT_ADDRESS taddr = NULL;
- cfs_spin_lock(&socket->kstc_lock);
+ spin_lock(&socket->kstc_lock);
if (remote) {
if (socket->kstc_type == kstt_sender) {
taddr = socket->sender.kstc_info.Remote;
if (port != NULL)
*port = ntohs (addr->sin_port);
} else {
- cfs_spin_unlock(&socket->kstc_lock);
- return -ENOTCONN;
- }
+ spin_unlock(&socket->kstc_lock);
+ return -ENOTCONN;
+ }
- cfs_spin_unlock(&socket->kstc_lock);
- return 0;
+ spin_unlock(&socket->kstc_lock);
+ return 0;
}
int libcfs_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
int cfs_tracefile_init_arch()
{
int j;
struct cfs_trace_cpu_data *tcd;
- cfs_init_rwsem(&cfs_tracefile_sem);
+ init_rwsem(&cfs_tracefile_sem);
/* initialize trace_data */
memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
cfs_trace_data[i] = NULL;
}
- cfs_fini_rwsem(&cfs_tracefile_sem);
+ fini_rwsem(&cfs_tracefile_sem);
}
void cfs_tracefile_read_lock()
{
- cfs_down_read(&cfs_tracefile_sem);
+ down_read(&cfs_tracefile_sem);
}
void cfs_tracefile_read_unlock()
{
- cfs_up_read(&cfs_tracefile_sem);
+ up_read(&cfs_tracefile_sem);
}
void cfs_tracefile_write_lock()
{
- cfs_down_write(&cfs_tracefile_sem);
+ down_write(&cfs_tracefile_sem);
}
void cfs_tracefile_write_unlock()
{
- cfs_up_write(&cfs_tracefile_sem);
+ up_write(&cfs_tracefile_sem);
}
cfs_trace_buf_type_t cfs_trace_buf_idx_get()
cfs_list_t ws_list; /* chain on global list */
#ifdef __KERNEL__
/** serialised workitems */
- cfs_spinlock_t ws_lock;
+ spinlock_t ws_lock;
/** where schedulers sleep */
cfs_waitq_t ws_waitq;
#endif
struct cfs_workitem_data {
/** serialize */
- cfs_spinlock_t wi_glock;
+ spinlock_t wi_glock;
/** list of all schedulers */
cfs_list_t wi_scheds;
/** WI module is initialized */
static inline void
cfs_wi_sched_lock(cfs_wi_sched_t *sched)
{
- cfs_spin_lock(&sched->ws_lock);
+ spin_lock(&sched->ws_lock);
}
static inline void
cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
{
- cfs_spin_unlock(&sched->ws_lock);
+ spin_unlock(&sched->ws_lock);
}
static inline int
static inline void
cfs_wi_sched_lock(cfs_wi_sched_t *sched)
{
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
static inline void
cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
{
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
}
#endif /* __KERNEL__ */
if (sched->ws_cptab != NULL)
cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
LASSERT(sched->ws_starting == 1);
sched->ws_starting--;
sched->ws_nthreads++;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_wi_sched_lock(sched);
cfs_wi_sched_unlock(sched);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
sched->ws_nthreads--;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
- return 0;
+ return 0;
}
#else /* __KERNEL__ */
int
cfs_wi_check_events (void)
{
- int n = 0;
- cfs_workitem_t *wi;
+ int n = 0;
+ cfs_workitem_t *wi;
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
- for (;;) {
+ for (;;) {
struct cfs_wi_sched *sched = NULL;
struct cfs_wi_sched *tmp;
LASSERT(sched->ws_nscheduled > 0);
sched->ws_nscheduled--;
- LASSERT (wi->wi_scheduled);
- wi->wi_scheduled = 0;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ LASSERT(wi->wi_scheduled);
+ wi->wi_scheduled = 0;
+ spin_unlock(&cfs_wi_data.wi_glock);
- n++;
- (*wi->wi_action) (wi);
+ n++;
+ (*wi->wi_action) (wi);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
- }
+ spin_lock(&cfs_wi_data.wi_glock);
+ }
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
- return n;
+ spin_unlock(&cfs_wi_data.wi_glock);
+ return n;
}
#endif
LASSERT(cfs_wi_data.wi_init);
LASSERT(!cfs_wi_data.wi_stopping);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
if (sched->ws_stopping) {
CDEBUG(D_INFO, "%s is in progress of stopping\n",
sched->ws_name);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
return;
}
LASSERT(!cfs_list_empty(&sched->ws_list));
sched->ws_stopping = 1;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
i = 2;
#ifdef __KERNEL__
cfs_waitq_broadcast(&sched->ws_waitq);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_nthreads > 0) {
CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
"waiting for %d threads of WI sched[%s] to terminate\n",
sched->ws_nthreads, sched->ws_name);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_pause(cfs_time_seconds(1) / 20);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
cfs_list_del(&sched->ws_list);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
#else
SET_BUT_UNUSED(i);
#endif
sched->ws_cpt = cpt;
#ifdef __KERNEL__
- cfs_spin_lock_init(&sched->ws_lock);
+ spin_lock_init(&sched->ws_lock);
cfs_waitq_init(&sched->ws_waitq);
#endif
CFS_INIT_LIST_HEAD(&sched->ws_runq);
rc = 0;
#ifdef __KERNEL__
while (nthrs > 0) {
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_schedule();
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
sched->ws_starting++;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
rc = cfs_create_thread(cfs_wi_scheduler, sched, 0);
if (rc >= 0) {
CERROR("Failed to create thread for WI scheduler %s: %d\n",
name, rc);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
/* make up for cfs_wi_sched_destroy */
cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
sched->ws_starting--;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_wi_sched_destroy(sched);
return rc;
#else
SET_BUT_UNUSED(rc);
#endif
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
*sched_pp = sched;
return 0;
{
memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
- cfs_spin_lock_init(&cfs_wi_data.wi_glock);
+ spin_lock_init(&cfs_wi_data.wi_glock);
CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
cfs_wi_data.wi_init = 1;
{
struct cfs_wi_sched *sched;
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
cfs_wi_data.wi_stopping = 1;
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
#ifdef __KERNEL__
/* nobody should contend on this list */
}
cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_nthreads != 0) {
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
cfs_pause(cfs_time_seconds(1) / 20);
- cfs_spin_lock(&cfs_wi_data.wi_glock);
+ spin_lock(&cfs_wi_data.wi_glock);
}
- cfs_spin_unlock(&cfs_wi_data.wi_glock);
+ spin_unlock(&cfs_wi_data.wi_glock);
}
#endif
while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) {
#ifdef __KERNEL__
-#define lnet_ptl_lock(ptl) cfs_spin_lock(&(ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl) cfs_spin_unlock(&(ptl)->ptl_lock)
-#define lnet_eq_wait_lock() cfs_spin_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock() cfs_spin_unlock(&the_lnet.ln_eq_wait_lock)
-#define lnet_ni_lock(ni) cfs_spin_lock(&(ni)->ni_lock)
-#define lnet_ni_unlock(ni) cfs_spin_unlock(&(ni)->ni_lock)
-#define LNET_MUTEX_LOCK(m) cfs_mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m) cfs_mutex_unlock(m)
+#define lnet_ptl_lock(ptl) spin_lock(&(ptl)->ptl_lock)
+#define lnet_ptl_unlock(ptl) spin_unlock(&(ptl)->ptl_lock)
+#define lnet_eq_wait_lock() spin_lock(&the_lnet.ln_eq_wait_lock)
+#define lnet_eq_wait_unlock() spin_unlock(&the_lnet.ln_eq_wait_lock)
+#define lnet_ni_lock(ni) spin_lock(&(ni)->ni_lock)
+#define lnet_ni_unlock(ni) spin_unlock(&(ni)->ni_lock)
+#define LNET_MUTEX_LOCK(m) mutex_lock(m)
+#define LNET_MUTEX_UNLOCK(m) mutex_unlock(m)
#else /* !__KERNEL__ */
typedef struct lnet_ni {
#ifdef __KERNEL__
- cfs_spinlock_t ni_lock;
+ spinlock_t ni_lock;
#else
# ifndef HAVE_LIBPTHREAD
int ni_lock;
typedef struct lnet_portal {
#ifdef __KERNEL__
- cfs_spinlock_t ptl_lock;
+ spinlock_t ptl_lock;
#else
# ifndef HAVE_LIBPTHREAD
int ptl_lock;
struct lnet_res_container ln_eq_container;
#ifdef __KERNEL__
cfs_waitq_t ln_eq_waitq;
- cfs_spinlock_t ln_eq_wait_lock;
+ spinlock_t ln_eq_wait_lock;
#else
# ifndef HAVE_LIBPTHREAD
int ln_eq_wait_lock;
cfs_list_t ln_rcd_zombie;
#ifdef __KERNEL__
/* serialise startup/shutdown */
- cfs_semaphore_t ln_rc_signal;
+ struct semaphore ln_rc_signal;
- cfs_mutex_t ln_api_mutex;
- cfs_mutex_t ln_lnd_mutex;
+ struct mutex ln_api_mutex;
+ struct mutex ln_lnd_mutex;
#else
# ifndef HAVE_LIBPTHREAD
int ln_api_mutex;
for (i = 0; i < npages; i++) {
if (p->mxg_pages[i] != NULL) {
__free_page(p->mxg_pages[i]);
- cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
- kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
- cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
+ spin_lock(&kmxlnd_data.kmx_mem_lock);
+ kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
+ spin_unlock(&kmxlnd_data.kmx_mem_lock);
}
}
mxlnd_free_pages(p);
return -ENOMEM;
}
- cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
- kmxlnd_data.kmx_mem_used += PAGE_SIZE;
- cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
+ spin_lock(&kmxlnd_data.kmx_mem_lock);
+ kmxlnd_data.kmx_mem_used += PAGE_SIZE;
+ spin_unlock(&kmxlnd_data.kmx_mem_lock);
}
*pp = p;
int i = (int) ((long) arg);
cfs_atomic_inc(&kmxlnd_data.kmx_nthreads);
- cfs_init_completion(&kmxlnd_data.kmx_completions[i]);
+ init_completion(&kmxlnd_data.kmx_completions[i]);
pid = cfs_create_thread(fn, arg, 0);
if (pid < 0) {
{
int i = (int) id;
cfs_atomic_dec (&kmxlnd_data.kmx_nthreads);
- cfs_complete(&kmxlnd_data.kmx_completions[i]);
+ complete(&kmxlnd_data.kmx_completions[i]);
}
/**
/* wakeup request_waitds */
mx_wakeup(kmxlnd_data.kmx_endpt);
- cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
- cfs_up(&kmxlnd_data.kmx_conn_sem);
+ up(&kmxlnd_data.kmx_tx_queue_sem);
+ up(&kmxlnd_data.kmx_conn_sem);
mxlnd_sleep(2 * CFS_HZ);
/* fall through */
CDEBUG(D_NET, "waiting on threads\n");
/* wait for threads to complete */
for (i = 0; i < nthreads; i++) {
- cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
CDEBUG(D_NET, "freeing completions\n");
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(cfs_completion_t));
+ nthreads * sizeof(struct completion));
/* fall through */
kmxlnd_data.kmx_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
CDEBUG(D_NET, "my incarnation is %llu\n", kmxlnd_data.kmx_incarnation);
- cfs_rwlock_init (&kmxlnd_data.kmx_global_lock);
- cfs_spin_lock_init (&kmxlnd_data.kmx_mem_lock);
+ rwlock_init (&kmxlnd_data.kmx_global_lock);
+ spin_lock_init (&kmxlnd_data.kmx_mem_lock);
CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_reqs);
CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_zombies);
CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_orphan_msgs);
- cfs_spin_lock_init (&kmxlnd_data.kmx_conn_lock);
- cfs_sema_init(&kmxlnd_data.kmx_conn_sem, 0);
+ spin_lock_init (&kmxlnd_data.kmx_conn_lock);
+ sema_init(&kmxlnd_data.kmx_conn_sem, 0);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_peers[i]);
}
CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_idle);
- cfs_spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
- kmxlnd_data.kmx_tx_next_cookie = 1;
- CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
- cfs_spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
- cfs_sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
+ spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
+ kmxlnd_data.kmx_tx_next_cookie = 1;
+ CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
+ spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
+ sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
kmxlnd_data.kmx_init = MXLND_INIT_DATA;
/*****************************************************/
/* start threads */
MXLND_ALLOC(kmxlnd_data.kmx_completions,
- nthreads * sizeof(cfs_completion_t));
+ nthreads * sizeof(struct completion));
if (kmxlnd_data.kmx_completions == NULL) {
CERROR("failed to alloc kmxlnd_data.kmx_completions\n");
goto failed;
}
memset(kmxlnd_data.kmx_completions, 0,
- nthreads * sizeof(cfs_completion_t));
+ nthreads * sizeof(struct completion));
CDEBUG(D_NET, "using %d %s in mx_wait_any()\n",
*kmxlnd_tunables.kmx_n_waitd,
cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
for (--i; i >= 0; i--) {
- cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(cfs_completion_t));
+ nthreads * sizeof(struct completion));
goto failed;
}
cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
for (--i; i >= 0; i--) {
- cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(cfs_completion_t));
+ nthreads * sizeof(struct completion));
goto failed;
}
ret = mxlnd_thread_start(mxlnd_timeoutd, (void*)((long)i++));
CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
- cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
+ up(&kmxlnd_data.kmx_tx_queue_sem);
for (--i; i >= 0; i--) {
- cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(cfs_completion_t));
+ nthreads * sizeof(struct completion));
goto failed;
}
ret = mxlnd_thread_start(mxlnd_connd, (void*)((long)i++));
CERROR("Starting mxlnd_connd failed with %d\n", ret);
cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
mx_wakeup(kmxlnd_data.kmx_endpt);
- cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
+ up(&kmxlnd_data.kmx_tx_queue_sem);
for (--i; i >= 0; i--) {
- cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+ wait_for_completion(&kmxlnd_data.kmx_completions[i]);
}
LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
MXLND_FREE(kmxlnd_data.kmx_completions,
- nthreads * sizeof(cfs_completion_t));
+ nthreads * sizeof(struct completion));
goto failed;
}
/* provide wrappers around LIBCFS_ALLOC/FREE to keep MXLND specific
* memory usage stats that include pages */
-#define MXLND_ALLOC(x, size) \
- do { \
- cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
- kmxlnd_data.kmx_mem_used += size; \
- cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
- LIBCFS_ALLOC(x, size); \
- if (unlikely(x == NULL)) { \
- cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
- kmxlnd_data.kmx_mem_used -= size; \
- cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
- } \
- } while (0)
-
-#define MXLND_FREE(x, size) \
- do { \
- cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
- kmxlnd_data.kmx_mem_used -= size; \
- cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
- LIBCFS_FREE(x, size); \
- } while (0)
+#define MXLND_ALLOC(x, size) \
+ do { \
+ spin_lock(&kmxlnd_data.kmx_mem_lock); \
+ kmxlnd_data.kmx_mem_used += size; \
+ spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+ LIBCFS_ALLOC(x, size); \
+ if (unlikely(x == NULL)) { \
+ spin_lock(&kmxlnd_data.kmx_mem_lock); \
+ kmxlnd_data.kmx_mem_used -= size; \
+ spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+ } \
+ } while (0)
+
+#define MXLND_FREE(x, size) \
+ do { \
+ spin_lock(&kmxlnd_data.kmx_mem_lock); \
+ kmxlnd_data.kmx_mem_used -= size; \
+ spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+ LIBCFS_FREE(x, size); \
+ } while (0)
typedef struct kmx_tunables
int kmx_init; /* initialization state */
cfs_atomic_t kmx_shutdown; /* shutting down? */
cfs_atomic_t kmx_nthreads; /* number of threads */
- cfs_completion_t *kmx_completions; /* array of completion structs */
- lnet_ni_t *kmx_ni; /* the LND instance */
- u64 kmx_incarnation; /* my incarnation value */
- long kmx_mem_used; /* memory used */
- mx_endpoint_t kmx_endpt; /* the MX endpoint */
- mx_endpoint_addr_t kmx_epa; /* the MX endpoint address */
-
- cfs_rwlock_t kmx_global_lock; /* global lock */
- cfs_spinlock_t kmx_mem_lock; /* memory accounting lock */
-
- cfs_list_t kmx_conn_reqs; /* list of connection requests */
- cfs_spinlock_t kmx_conn_lock; /* connection list lock */
- cfs_semaphore_t kmx_conn_sem; /* semaphore for connection request list */
+ struct completion *kmx_completions; /* array of completion struct */
+ lnet_ni_t *kmx_ni; /* the LND instance */
+ u64 kmx_incarnation; /* my incarnation value */
+ long kmx_mem_used; /* memory used */
+ mx_endpoint_t kmx_endpt; /* the MX endpoint */
+ mx_endpoint_addr_t kmx_epa; /* the MX endpoint address */
+
+ rwlock_t kmx_global_lock; /* global lock */
+ spinlock_t kmx_mem_lock; /* memory accounting lock */
+
+ cfs_list_t kmx_conn_reqs; /* list of connection reqs */
+ spinlock_t kmx_conn_lock; /* connection list lock */
+ struct semaphore kmx_conn_sem; /* connection request list */
cfs_list_t kmx_conn_zombies; /* list of zombie connections */
cfs_list_t kmx_orphan_msgs; /* list of txs to cancel */
struct kmx_ctx *kmx_txs; /* all tx descriptors */
cfs_list_t kmx_tx_idle; /* list of idle tx */
- cfs_spinlock_t kmx_tx_idle_lock; /* lock for idle tx list */
- s32 kmx_tx_used; /* txs in use */
- u64 kmx_tx_next_cookie; /* unique id for tx */
- cfs_list_t kmx_tx_queue; /* generic send queue */
- cfs_spinlock_t kmx_tx_queue_lock; /* lock for generic sends */
- cfs_semaphore_t kmx_tx_queue_sem; /* semaphore for tx queue */
+ spinlock_t kmx_tx_idle_lock; /* lock for idle tx list */
+ s32 kmx_tx_used; /* txs in use */
+ u64 kmx_tx_next_cookie; /* unique id for tx */
+ cfs_list_t kmx_tx_queue; /* generic send queue */
+ spinlock_t kmx_tx_queue_lock; /* lock for generic sends */
+ struct semaphore kmx_tx_queue_sem; /* semaphore for tx queue */
} kmx_data_t;
#define MXLND_INIT_NOTHING 0 /* in the beginning, there was nothing... */
mx_endpoint_addr_t mxk_epa; /* peer's endpoint address */
- cfs_spinlock_t mxk_lock; /* lock */
+ spinlock_t mxk_lock; /* lock */
unsigned long mxk_timeout; /* expiration of oldest pending tx/rx */
unsigned long mxk_last_tx; /* when last tx completed with success */
unsigned long mxk_last_rx; /* when last rx completed */
} while (0)
-#define mxlnd_conn_decref(conn) \
-do { \
- LASSERT(conn != NULL); \
- LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0); \
- if (cfs_atomic_dec_and_test(&(conn)->mxk_refcount)) { \
- cfs_spin_lock(&kmxlnd_data.kmx_conn_lock); \
- LASSERT((conn)->mxk_status == MXLND_CONN_DISCONNECT); \
- CDEBUG(D_NET, "adding conn %p to zombies\n", (conn)); \
- cfs_list_add_tail(&(conn)->mxk_zombie, \
- &kmxlnd_data.kmx_conn_zombies); \
- cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock); \
- cfs_up(&kmxlnd_data.kmx_conn_sem); \
- } \
+#define mxlnd_conn_decref(conn) \
+do { \
+ LASSERT(conn != NULL); \
+ LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0); \
+ if (cfs_atomic_dec_and_test(&(conn)->mxk_refcount)) { \
+ spin_lock(&kmxlnd_data.kmx_conn_lock); \
+ LASSERT((conn)->mxk_status == MXLND_CONN_DISCONNECT); \
+ CDEBUG(D_NET, "adding conn %p to zombies\n", (conn)); \
+ cfs_list_add_tail(&(conn)->mxk_zombie, \
+ &kmxlnd_data.kmx_conn_zombies); \
+ spin_unlock(&kmxlnd_data.kmx_conn_lock); \
+ up(&kmxlnd_data.kmx_conn_sem); \
+ } \
} while (0)
#define mxlnd_valid_msg_type(type) \
rxs = &conn->mxk_rx_idle;
- cfs_spin_lock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
- if (cfs_list_empty (rxs)) {
- cfs_spin_unlock(&conn->mxk_lock);
- return NULL;
- }
+ if (cfs_list_empty (rxs)) {
+ spin_unlock(&conn->mxk_lock);
+ return NULL;
+ }
- rx = cfs_list_entry (rxs->next, kmx_ctx_t, mxc_list);
- cfs_list_del_init(&rx->mxc_list);
- cfs_spin_unlock(&conn->mxk_lock);
+ rx = cfs_list_entry (rxs->next, kmx_ctx_t, mxc_list);
+ cfs_list_del_init(&rx->mxc_list);
+ spin_unlock(&conn->mxk_lock);
#if MXLND_DEBUG
if (rx->mxc_get != rx->mxc_put) {
rx->mxc_put++;
LASSERT(rx->mxc_get == rx->mxc_put);
- cfs_spin_lock(&conn->mxk_lock);
- cfs_list_add(&rx->mxc_list, rxs);
- cfs_spin_unlock(&conn->mxk_lock);
- return 0;
+ spin_lock(&conn->mxk_lock);
+ cfs_list_add(&rx->mxc_list, rxs);
+ spin_unlock(&conn->mxk_lock);
+ return 0;
}
kmx_ctx_t *
mxlnd_get_idle_tx(void)
{
- cfs_list_t *tmp = &kmxlnd_data.kmx_tx_idle;
- kmx_ctx_t *tx = NULL;
+ cfs_list_t *tmp = &kmxlnd_data.kmx_tx_idle;
+ kmx_ctx_t *tx = NULL;
- cfs_spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
+ spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
- if (cfs_list_empty (&kmxlnd_data.kmx_tx_idle)) {
- CNETERR("%d txs in use\n", kmxlnd_data.kmx_tx_used);
- cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
- return NULL;
- }
+ if (cfs_list_empty (&kmxlnd_data.kmx_tx_idle)) {
+ CNETERR("%d txs in use\n", kmxlnd_data.kmx_tx_used);
+ spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+ return NULL;
+ }
tmp = &kmxlnd_data.kmx_tx_idle;
tx = cfs_list_entry (tmp->next, kmx_ctx_t, mxc_list);
kmxlnd_data.kmx_tx_next_cookie = 1;
}
kmxlnd_data.kmx_tx_used++;
- cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+ spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
LASSERT (tx->mxc_get == tx->mxc_put);
tx->mxc_put++;
LASSERT(tx->mxc_get == tx->mxc_put);
- cfs_spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
- cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
- kmxlnd_data.kmx_tx_used--;
- cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+ spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
+ cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
+ kmxlnd_data.kmx_tx_used--;
+ spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
- if (lntmsg[0] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[0], result);
- if (lntmsg[1] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[1], result);
- return 0;
+ if (lntmsg[0] != NULL)
+ lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[0], result);
+ if (lntmsg[1] != NULL)
+ lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[1], result);
+ return 0;
}
do {
found = 0;
- cfs_spin_lock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
cfs_list_for_each_entry_safe(ctx, next, &conn->mxk_pending,
mxc_list) {
cfs_list_del_init(&ctx->mxc_list);
if (result == 1) {
ctx->mxc_errno = -ECONNABORTED;
ctx->mxc_state = MXLND_CTX_CANCELED;
- cfs_spin_unlock(&conn->mxk_lock);
- cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
+ spin_unlock(&conn->mxk_lock);
+ spin_lock(&kmxlnd_data.kmx_conn_lock);
/* we may be holding the global lock,
* move to orphan list so that it can free it */
cfs_list_add_tail(&ctx->mxc_list,
&kmxlnd_data.kmx_orphan_msgs);
count++;
- cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
- cfs_spin_lock(&conn->mxk_lock);
- }
- break;
- }
- }
- cfs_spin_unlock(&conn->mxk_lock);
- }
- while (found);
+ spin_unlock(&kmxlnd_data.kmx_conn_lock);
+ spin_lock(&conn->mxk_lock);
+ }
+ break;
+ }
+ }
+ spin_unlock(&conn->mxk_lock);
+ } while (found);
- return count;
+ return count;
}
int
mxlnd_cancel_queued_txs(kmx_conn_t *conn)
{
- int count = 0;
- cfs_list_t *tmp = NULL;
+ int count = 0;
+ cfs_list_t *tmp = NULL;
- cfs_spin_lock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
while (!cfs_list_empty(&conn->mxk_tx_free_queue) ||
!cfs_list_empty(&conn->mxk_tx_credit_queue)) {
tx = cfs_list_entry(tmp->next, kmx_ctx_t, mxc_list);
cfs_list_del_init(&tx->mxc_list);
- cfs_spin_unlock(&conn->mxk_lock);
- tx->mxc_errno = -ECONNABORTED;
- tx->mxc_state = MXLND_CTX_CANCELED;
- /* move to orphan list and then abort */
- cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
- cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs);
- cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
- count++;
- cfs_spin_lock(&conn->mxk_lock);
- }
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_unlock(&conn->mxk_lock);
+ tx->mxc_errno = -ECONNABORTED;
+ tx->mxc_state = MXLND_CTX_CANCELED;
+ /* move to orphan list and then abort */
+ spin_lock(&kmxlnd_data.kmx_conn_lock);
+ cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs);
+ spin_unlock(&kmxlnd_data.kmx_conn_lock);
+ count++;
+ spin_lock(&conn->mxk_lock);
+ }
+ spin_unlock(&conn->mxk_lock);
- return count;
+ return count;
}
void
void
mxlnd_conn_disconnect(kmx_conn_t *conn, int mx_dis, int send_bye)
{
- mx_endpoint_addr_t epa = conn->mxk_epa;
- int valid = !mxlnd_endpoint_addr_null(epa);
- int count = 0;
+ mx_endpoint_addr_t epa = conn->mxk_epa;
+ int valid = !mxlnd_endpoint_addr_null(epa);
+ int count = 0;
- cfs_spin_lock(&conn->mxk_lock);
- if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
- cfs_spin_unlock(&conn->mxk_lock);
- return;
- }
- mxlnd_set_conn_status(conn, MXLND_CONN_DISCONNECT);
- conn->mxk_timeout = 0;
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
+ spin_unlock(&conn->mxk_lock);
+ return;
+ }
+ mxlnd_set_conn_status(conn, MXLND_CONN_DISCONNECT);
+ conn->mxk_timeout = 0;
+ spin_unlock(&conn->mxk_lock);
- count = mxlnd_cancel_queued_txs(conn);
- count += mxlnd_conn_cancel_pending_rxs(conn);
+ count = mxlnd_cancel_queued_txs(conn);
+ count += mxlnd_conn_cancel_pending_rxs(conn);
- if (count)
- cfs_up(&kmxlnd_data.kmx_conn_sem); /* let connd call kmxlnd_abort_msgs() */
+ if (count) /* let connd call kmxlnd_abort_msgs() */
+ up(&kmxlnd_data.kmx_conn_sem);
if (send_bye && valid &&
conn->mxk_peer->mxp_nid != kmxlnd_data.kmx_ni->ni_nid) {
mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
/* mxk_epa - to be set after mx_iconnect() */
}
- cfs_spin_lock_init(&conn->mxk_lock);
+ spin_lock_init(&conn->mxk_lock);
/* conn->mxk_timeout = 0 */
/* conn->mxk_last_tx = 0 */
/* conn->mxk_last_rx = 0 */
mxlnd_conn_alloc(kmx_conn_t **connp, kmx_peer_t *peer)
{
int ret = 0;
- cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- cfs_write_lock(g_lock);
+ write_lock(g_lock);
ret = mxlnd_conn_alloc_locked(connp, peer);
- cfs_write_unlock(g_lock);
+ write_unlock(g_lock);
return ret;
}
int
mxlnd_q_pending_ctx(kmx_ctx_t *ctx)
{
- int ret = 0;
- kmx_conn_t *conn = ctx->mxc_conn;
+ int ret = 0;
+ kmx_conn_t *conn = ctx->mxc_conn;
- ctx->mxc_state = MXLND_CTX_PENDING;
- if (conn != NULL) {
- cfs_spin_lock(&conn->mxk_lock);
+ ctx->mxc_state = MXLND_CTX_PENDING;
+ if (conn != NULL) {
+ spin_lock(&conn->mxk_lock);
if (conn->mxk_status >= MXLND_CONN_INIT) {
cfs_list_add_tail(&ctx->mxc_list, &conn->mxk_pending);
if (conn->mxk_timeout == 0 || ctx->mxc_deadline < conn->mxk_timeout) {
ctx->mxc_state = MXLND_CTX_COMPLETED;
ret = -1;
}
- cfs_spin_unlock(&conn->mxk_lock);
- }
- return ret;
+ spin_unlock(&conn->mxk_lock);
+ }
+ return ret;
}
int
kmx_ctx_t *next = NULL;
LASSERT(conn != NULL);
- cfs_spin_lock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
cfs_list_del_init(&ctx->mxc_list);
conn->mxk_timeout = 0;
if (!cfs_list_empty(&conn->mxk_pending)) {
kmx_ctx_t, mxc_list);
conn->mxk_timeout = next->mxc_deadline;
}
- cfs_spin_unlock(&conn->mxk_lock);
- }
- return 0;
+ spin_unlock(&conn->mxk_lock);
+ }
+ return 0;
}
/**
int hash = 0;
kmx_peer_t *peer = NULL;
kmx_peer_t *old = NULL;
- cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- cfs_read_lock(g_lock);
+ read_lock(g_lock);
peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */
if ((peer && peer->mxp_conn) || /* found peer with conn or */
(!peer && !create)) { /* did not find peer and do not create one */
- cfs_read_unlock(g_lock);
+ read_unlock(g_lock);
return peer;
}
- cfs_read_unlock(g_lock);
+ read_unlock(g_lock);
/* if peer but _not_ conn */
if (peer && !peer->mxp_conn) {
if (create) {
- cfs_write_lock(g_lock);
+ write_lock(g_lock);
if (!peer->mxp_conn) { /* check again */
/* create the conn */
ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer);
mxlnd_conn_decref(peer->mxp_conn);
}
}
- cfs_write_unlock(g_lock);
+ write_unlock(g_lock);
}
return peer;
}
if (ret != 0) /* no memory, peer is NULL */
return NULL;
- cfs_write_lock(g_lock);
+ write_lock(g_lock);
/* look again */
old = mxlnd_find_peer_by_nid_locked(nid);
mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */
}
- cfs_write_unlock(g_lock);
+ write_unlock(g_lock);
return peer;
}
mx_decompose_endpoint_addr2(source, &nic_id, &ep_id, &sid);
mxlnd_parse_match(match_value, &msg_type, &error, &cookie);
- cfs_read_lock(&kmxlnd_data.kmx_global_lock);
+ read_lock(&kmxlnd_data.kmx_global_lock);
mx_get_endpoint_addr_context(source, (void **) &conn);
if (conn) {
mxlnd_conn_addref(conn); /* add ref for this function */
peer = conn->mxk_peer;
}
- cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
+ read_unlock(&kmxlnd_data.kmx_global_lock);
if (msg_type == MXLND_MSG_BYE) {
if (conn) {
mxlnd_send_message(source, MXLND_MSG_CONN_ACK, ENOMEM, 0);
return MX_RECV_FINISHED;
}
- cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
- cfs_list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
- cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
- cfs_up(&kmxlnd_data.kmx_conn_sem);
- return MX_RECV_FINISHED;
- }
+ spin_lock(&kmxlnd_data.kmx_conn_lock);
+ cfs_list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
+ spin_unlock(&kmxlnd_data.kmx_conn_lock);
+ up(&kmxlnd_data.kmx_conn_sem);
+ return MX_RECV_FINISHED;
+ }
if (msg_type == MXLND_MSG_CONN_ACK) {
kmx_connparams_t *cp = NULL;
const int expected = offsetof(kmx_msg_t, mxm_u) +
CNETERR("unable to alloc kmx_connparams_t"
" from %llx:%d\n", nic_id, ep_id);
mxlnd_conn_disconnect(conn, 1, 1);
- } else {
- cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
- cfs_list_add_tail(&cp->mxr_list,
- &kmxlnd_data.kmx_conn_reqs);
- cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
- cfs_up(&kmxlnd_data.kmx_conn_sem);
- }
+ } else {
+ spin_lock(&kmxlnd_data.kmx_conn_lock);
+ cfs_list_add_tail(&cp->mxr_list,
+ &kmxlnd_data.kmx_conn_reqs);
+ spin_unlock(&kmxlnd_data.kmx_conn_lock);
+ up(&kmxlnd_data.kmx_conn_sem);
+ }
}
mxlnd_conn_decref(conn); /* drop ref taken above */
int ret = -ENOENT;
kmx_peer_t *peer = NULL;
- cfs_read_lock(&kmxlnd_data.kmx_global_lock);
+ read_lock(&kmxlnd_data.kmx_global_lock);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
mxp_list) {
}
}
}
- cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
+ read_unlock(&kmxlnd_data.kmx_global_lock);
return ret;
}
if (nid != LNET_NID_ANY) {
peer = mxlnd_find_peer_by_nid(nid, 0); /* adds peer ref */
}
- cfs_write_lock(&kmxlnd_data.kmx_global_lock);
+ write_lock(&kmxlnd_data.kmx_global_lock);
if (nid != LNET_NID_ANY) {
if (peer == NULL) {
ret = -ENOENT;
}
}
}
- cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+ write_unlock(&kmxlnd_data.kmx_global_lock);
return ret;
}
kmx_peer_t *peer = NULL;
kmx_conn_t *conn = NULL;
- cfs_read_lock(&kmxlnd_data.kmx_global_lock);
+ read_lock(&kmxlnd_data.kmx_global_lock);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
mxp_list) {
}
mxlnd_conn_addref(conn); /* add ref here, dec in ctl() */
- cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
+ read_unlock(&kmxlnd_data.kmx_global_lock);
return conn;
}
}
}
- cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
+ read_unlock(&kmxlnd_data.kmx_global_lock);
return NULL;
}
int ret = 0;
kmx_peer_t *peer = NULL;
- cfs_write_lock(&kmxlnd_data.kmx_global_lock);
+ write_lock(&kmxlnd_data.kmx_global_lock);
if (nid != LNET_NID_ANY) {
peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */
if (peer == NULL) {
mxlnd_close_matching_conns_locked(peer);
}
}
- cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+ write_unlock(&kmxlnd_data.kmx_global_lock);
return ret;
}
static inline void
mxlnd_peer_queue_tx(kmx_ctx_t *tx)
{
- LASSERT(tx->mxc_peer != NULL);
- LASSERT(tx->mxc_conn != NULL);
- cfs_spin_lock(&tx->mxc_conn->mxk_lock);
- mxlnd_peer_queue_tx_locked(tx);
- cfs_spin_unlock(&tx->mxc_conn->mxk_lock);
+ LASSERT(tx->mxc_peer != NULL);
+ LASSERT(tx->mxc_conn != NULL);
+ spin_lock(&tx->mxc_conn->mxk_lock);
+ mxlnd_peer_queue_tx_locked(tx);
+ spin_unlock(&tx->mxc_conn->mxk_lock);
- return;
+ return;
}
/**
mxlnd_peer_queue_tx(tx);
mxlnd_check_sends(peer);
} else {
- cfs_spin_lock(&kmxlnd_data.kmx_tx_queue_lock);
- cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue);
- cfs_spin_unlock(&kmxlnd_data.kmx_tx_queue_lock);
- cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
- }
+ spin_lock(&kmxlnd_data.kmx_tx_queue_lock);
+ cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue);
+ spin_unlock(&kmxlnd_data.kmx_tx_queue_lock);
+ up(&kmxlnd_data.kmx_tx_queue_sem);
+ }
done:
- return;
+ return;
}
int
int nob = 0;
uint32_t length = 0;
kmx_peer_t *peer = NULL;
- cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ rwlock_t *g_lock =&kmxlnd_data.kmx_global_lock;
CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
if (unlikely(peer->mxp_incompatible)) {
mxlnd_peer_decref(peer); /* drop ref taken above */
} else {
- cfs_read_lock(g_lock);
- conn = peer->mxp_conn;
- if (conn && conn->mxk_status != MXLND_CONN_DISCONNECT) {
- mxlnd_conn_addref(conn);
- } else {
- conn = NULL;
- }
- cfs_read_unlock(g_lock);
+ read_lock(g_lock);
+ conn = peer->mxp_conn;
+ if (conn && conn->mxk_status != MXLND_CONN_DISCONNECT)
+ mxlnd_conn_addref(conn);
+ else
+ conn = NULL;
+ read_unlock(g_lock);
mxlnd_peer_decref(peer); /* drop peer ref taken above */
if (!conn)
return -ENOTCONN;
if (repost) {
/* we received a message, increment peer's outstanding credits */
- if (credit == 1) {
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_outstanding++;
- cfs_spin_unlock(&conn->mxk_lock);
- }
+ if (credit == 1) {
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_outstanding++;
+ spin_unlock(&conn->mxk_lock);
+ }
/* we are done with the rx */
mxlnd_put_idle_rx(rx);
mxlnd_conn_decref(conn);
kmx_ctx_t *tx = NULL;
kmx_peer_t *peer = NULL;
cfs_list_t *queue = &kmxlnd_data.kmx_tx_queue;
- cfs_spinlock_t *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
- cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
-
- cfs_daemonize("mxlnd_tx_queued");
-
- while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
- ret = cfs_down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
- if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
- break;
- if (ret != 0) // Should we check for -EINTR?
- continue;
- cfs_spin_lock(tx_q_lock);
- if (cfs_list_empty (&kmxlnd_data.kmx_tx_queue)) {
- cfs_spin_unlock(tx_q_lock);
- continue;
- }
- tx = cfs_list_entry (queue->next, kmx_ctx_t, mxc_list);
- cfs_list_del_init(&tx->mxc_list);
- cfs_spin_unlock(tx_q_lock);
-
- found = 0;
- peer = mxlnd_find_peer_by_nid(tx->mxc_nid, 0); /* adds peer ref */
- if (peer != NULL) {
- tx->mxc_peer = peer;
- cfs_write_lock(g_lock);
- if (peer->mxp_conn == NULL) {
- ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer);
- if (ret != 0) {
- /* out of memory, give up and fail tx */
- tx->mxc_errno = -ENOMEM;
- mxlnd_peer_decref(peer);
- cfs_write_unlock(g_lock);
- mxlnd_put_idle_tx(tx);
- continue;
- }
- }
- tx->mxc_conn = peer->mxp_conn;
- mxlnd_conn_addref(tx->mxc_conn); /* for this tx */
- mxlnd_peer_decref(peer); /* drop peer ref taken above */
- cfs_write_unlock(g_lock);
+ spinlock_t *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
+ rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+
+ cfs_daemonize("mxlnd_tx_queued");
+
+ while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
+ if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
+ break;
+ if (ret != 0) /* Should we check for -EINTR? */
+ continue;
+ spin_lock(tx_q_lock);
+ if (cfs_list_empty(&kmxlnd_data.kmx_tx_queue)) {
+ spin_unlock(tx_q_lock);
+ continue;
+ }
+ tx = cfs_list_entry(queue->next, kmx_ctx_t, mxc_list);
+ cfs_list_del_init(&tx->mxc_list);
+ spin_unlock(tx_q_lock);
+
+ found = 0;
+ peer = mxlnd_find_peer_by_nid(tx->mxc_nid, 0); /* adds ref*/
+ if (peer != NULL) {
+ tx->mxc_peer = peer;
+ write_lock(g_lock);
+ if (peer->mxp_conn == NULL) {
+ ret = mxlnd_conn_alloc_locked(&peer->mxp_conn,
+ peer);
+ if (ret != 0) {
+ /* out of memory: give up, fail tx */
+ tx->mxc_errno = -ENOMEM;
+ mxlnd_peer_decref(peer);
+ write_unlock(g_lock);
+ mxlnd_put_idle_tx(tx);
+ continue;
+ }
+ }
+ tx->mxc_conn = peer->mxp_conn;
+ mxlnd_conn_addref(tx->mxc_conn); /* for this tx */
+ mxlnd_peer_decref(peer); /* drop peer ref taken above */
+ write_unlock(g_lock);
mxlnd_queue_tx(tx);
found = 1;
}
/* add peer to global peer list, but look to see
* if someone already created it after we released
* the read lock */
- cfs_write_lock(g_lock);
+ write_lock(g_lock);
old = mxlnd_find_peer_by_nid_locked(peer->mxp_nid);
if (old) {
/* we have a peer ref on old */
mxlnd_conn_decref(peer->mxp_conn); /* drop peer's ref */
mxlnd_peer_decref(peer);
}
- cfs_write_unlock(g_lock);
+ write_unlock(g_lock);
mxlnd_queue_tx(tx);
}
mx_nic_id_to_board_number(peer->mxp_nic_id, &peer->mxp_board);
}
if (peer->mxp_nic_id == 0ULL && conn->mxk_status == MXLND_CONN_WAIT) {
- /* not mapped yet, return */
- cfs_spin_lock(&conn->mxk_lock);
- mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
- cfs_spin_unlock(&conn->mxk_lock);
+ /* not mapped yet, return */
+ spin_lock(&conn->mxk_lock);
+ mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
+ spin_unlock(&conn->mxk_lock);
}
}
peer->mxp_ep_id, MXLND_MSG_MAGIC, match,
(void *) peer, &request);
if (unlikely(mxret != MX_SUCCESS)) {
- cfs_spin_lock(&conn->mxk_lock);
- mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+ spin_unlock(&conn->mxk_lock);
CNETERR("mx_iconnect() failed with %s (%d) to %s\n",
mx_strerror(mxret), mxret, libcfs_nid2str(peer->mxp_nid));
mxlnd_conn_decref(conn);
LASSERT(peer != NULL);
return -1;
}
- cfs_write_lock(&kmxlnd_data.kmx_global_lock);
- conn = peer->mxp_conn;
- /* NOTE take a ref for the duration of this function since it is called
- * when there might not be any queued txs for this peer */
- if (conn) {
- if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
- cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
- return -1;
- }
- mxlnd_conn_addref(conn); /* for duration of this function */
- }
- cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+ write_lock(&kmxlnd_data.kmx_global_lock);
+ conn = peer->mxp_conn;
+ /* NOTE take a ref for the duration of this function since it is
+ * called when there might not be any queued txs for this peer */
+ if (conn) {
+ if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
+ write_unlock(&kmxlnd_data.kmx_global_lock);
+ return -1;
+ }
+ mxlnd_conn_addref(conn); /* for duration of this function */
+ }
+ write_unlock(&kmxlnd_data.kmx_global_lock);
/* do not add another ref for this tx */
}
#endif
- cfs_spin_lock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
ntx_posted = conn->mxk_ntx_posted;
credits = conn->mxk_credits;
conn->mxk_status == MXLND_CONN_FAIL)) {
CDEBUG(D_NET, "status=%s\n", mxlnd_connstatus_to_str(conn->mxk_status));
mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_unlock(&conn->mxk_lock);
mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_REQ);
goto done;
}
cfs_time_aftereq(jiffies, tx->mxc_deadline)) {
cfs_list_del_init(&tx->mxc_list);
tx->mxc_errno = -ECONNABORTED;
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_unlock(&conn->mxk_lock);
mxlnd_put_idle_tx(tx);
mxlnd_conn_decref(conn);
goto done;
(conn->mxk_ntx_msgs >= 1)) {
conn->mxk_credits++;
conn->mxk_ntx_posted--;
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_unlock(&conn->mxk_lock);
/* redundant NOOP */
mxlnd_put_idle_tx(tx);
mxlnd_conn_decref(conn);
mxret = MX_SUCCESS;
status = conn->mxk_status;
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_unlock(&conn->mxk_lock);
if (likely((status == MXLND_CONN_READY) ||
(msg_type == MXLND_MSG_CONN_REQ) ||
&tx->mxc_mxreq);
} else {
/* send a DATA tx */
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_ntx_data--;
- conn->mxk_data_posted++;
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_ntx_data--;
+ conn->mxk_data_posted++;
+ spin_unlock(&conn->mxk_lock);
CDEBUG(D_NET, "sending %s 0x%llx\n",
mxlnd_msgtype_to_str(msg_type),
tx->mxc_cookie);
tx->mxc_errno = -ECONNABORTED;
}
if (credit) {
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_ntx_posted--;
- conn->mxk_credits++;
- cfs_spin_unlock(&conn->mxk_lock);
- } else if (msg_type == MXLND_MSG_PUT_DATA ||
- msg_type == MXLND_MSG_GET_DATA) {
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_data_posted--;
- cfs_spin_unlock(&conn->mxk_lock);
- }
- if (msg_type != MXLND_MSG_PUT_DATA &&
- msg_type != MXLND_MSG_GET_DATA &&
- msg_type != MXLND_MSG_CONN_REQ &&
- msg_type != MXLND_MSG_CONN_ACK) {
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_outstanding += tx->mxc_msg->mxm_credits;
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_ntx_posted--;
+ conn->mxk_credits++;
+ spin_unlock(&conn->mxk_lock);
+ } else if (msg_type == MXLND_MSG_PUT_DATA ||
+ msg_type == MXLND_MSG_GET_DATA) {
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_data_posted--;
+ spin_unlock(&conn->mxk_lock);
+ }
+ if (msg_type != MXLND_MSG_PUT_DATA &&
+ msg_type != MXLND_MSG_GET_DATA &&
+ msg_type != MXLND_MSG_CONN_REQ &&
+ msg_type != MXLND_MSG_CONN_ACK) {
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_outstanding +=
+ tx->mxc_msg->mxm_credits;
+ spin_unlock(&conn->mxk_lock);
}
if (msg_type != MXLND_MSG_CONN_REQ &&
msg_type != MXLND_MSG_CONN_ACK) {
mxlnd_conn_decref(conn);
}
}
- cfs_spin_lock(&conn->mxk_lock);
- }
+ spin_lock(&conn->mxk_lock);
+ }
done_locked:
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_unlock(&conn->mxk_lock);
done:
- mxlnd_conn_decref(conn); /* drop ref taken at start of function */
- return found;
+ mxlnd_conn_decref(conn); /* drop ref taken at start of function */
+ return found;
}
if (failed) {
if (tx->mxc_errno == 0) tx->mxc_errno = -EIO;
} else {
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_last_tx = cfs_time_current(); /* jiffies */
- cfs_spin_unlock(&conn->mxk_lock);
- }
-
- switch (type) {
-
- case MXLND_MSG_GET_DATA:
- cfs_spin_lock(&conn->mxk_lock);
- if (conn->mxk_incarnation == tx->mxc_incarnation) {
- conn->mxk_outstanding++;
- conn->mxk_data_posted--;
- }
- cfs_spin_unlock(&conn->mxk_lock);
- break;
-
- case MXLND_MSG_PUT_DATA:
- cfs_spin_lock(&conn->mxk_lock);
- if (conn->mxk_incarnation == tx->mxc_incarnation) {
- conn->mxk_data_posted--;
- }
- cfs_spin_unlock(&conn->mxk_lock);
- break;
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_last_tx = cfs_time_current(); /* jiffies */
+ spin_unlock(&conn->mxk_lock);
+ }
+
+ switch (type) {
+
+ case MXLND_MSG_GET_DATA:
+ spin_lock(&conn->mxk_lock);
+ if (conn->mxk_incarnation == tx->mxc_incarnation) {
+ conn->mxk_outstanding++;
+ conn->mxk_data_posted--;
+ }
+ spin_unlock(&conn->mxk_lock);
+ break;
+
+ case MXLND_MSG_PUT_DATA:
+ spin_lock(&conn->mxk_lock);
+ if (conn->mxk_incarnation == tx->mxc_incarnation) {
+ conn->mxk_data_posted--;
+ }
+ spin_unlock(&conn->mxk_lock);
+ break;
case MXLND_MSG_NOOP:
case MXLND_MSG_PUT_REQ:
mx_strstatus(code), code, tx->mxc_errno,
libcfs_nid2str(tx->mxc_nid));
if (!peer->mxp_incompatible) {
- cfs_spin_lock(&conn->mxk_lock);
- if (code == MX_STATUS_BAD_SESSION)
- mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
- else
- mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ if (code == MX_STATUS_BAD_SESSION)
+ mxlnd_set_conn_status(conn,
+ MXLND_CONN_INIT);
+ else
+ mxlnd_set_conn_status(conn,
+ MXLND_CONN_FAIL);
+ spin_unlock(&conn->mxk_lock);
}
}
break;
}
if (credit) {
- cfs_spin_lock(&conn->mxk_lock);
- if (conn->mxk_incarnation == tx->mxc_incarnation) {
- conn->mxk_ntx_posted--;
- }
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ if (conn->mxk_incarnation == tx->mxc_incarnation) {
+ conn->mxk_ntx_posted--;
+ }
+ spin_unlock(&conn->mxk_lock);
}
mxlnd_put_idle_tx(tx);
} /* else peer and conn == NULL */
if (conn == NULL && peer != NULL) {
- cfs_write_lock(&kmxlnd_data.kmx_global_lock);
+ write_lock(&kmxlnd_data.kmx_global_lock);
conn = peer->mxp_conn;
if (conn) {
mxlnd_conn_addref(conn); /* conn takes ref... */
conn_ref = 1;
peer_ref = 0;
}
- cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+ write_unlock(&kmxlnd_data.kmx_global_lock);
rx->mxc_conn = conn;
}
LASSERT(peer != NULL && conn != NULL);
if (msg->mxm_credits != 0) {
- cfs_spin_lock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
if (msg->mxm_srcstamp == conn->mxk_incarnation) {
if ((conn->mxk_credits + msg->mxm_credits) >
*kmxlnd_tunables.kmx_peercredits) {
LASSERT(conn->mxk_credits >= 0);
LASSERT(conn->mxk_credits <= *kmxlnd_tunables.kmx_peercredits);
}
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_unlock(&conn->mxk_lock);
}
CDEBUG(D_NET, "switch %s for rx (0x%llx)\n", mxlnd_msgtype_to_str(type), seq);
if (ret < 0) {
CDEBUG(D_NET, "setting PEER_CONN_FAILED\n");
- cfs_spin_lock(&conn->mxk_lock);
- mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- cfs_spin_unlock(&conn->mxk_lock);
- }
+ spin_lock(&conn->mxk_lock);
+ mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+ spin_unlock(&conn->mxk_lock);
+ }
cleanup:
- if (conn != NULL) {
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_last_rx = cfs_time_current(); /* jiffies */
- cfs_spin_unlock(&conn->mxk_lock);
+ if (conn != NULL) {
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_last_rx = cfs_time_current(); /* jiffies */
+ spin_unlock(&conn->mxk_lock);
}
if (repost) {
type == MXLND_MSG_EAGER ||
type == MXLND_MSG_PUT_REQ ||
type == MXLND_MSG_NOOP) {
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_outstanding++;
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_outstanding++;
+ spin_unlock(&conn->mxk_lock);
}
}
if (conn_ref) mxlnd_conn_decref(conn);
peer->mxp_nid,
peer->mxp_nic_id,
peer->mxp_ep_id);
- cfs_spin_lock(&conn->mxk_lock);
- mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+ spin_unlock(&conn->mxk_lock);
if (cfs_time_after(jiffies, peer->mxp_reconnect_time +
MXLND_CONNECT_TIMEOUT)) {
return;
}
mx_decompose_endpoint_addr2(status.source, &nic_id, &ep_id, &sid);
- cfs_write_lock(&kmxlnd_data.kmx_global_lock);
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_epa = status.source;
- mx_set_endpoint_addr_context(conn->mxk_epa, (void *) conn);
- if (msg_type == MXLND_MSG_ICON_ACK && likely(!peer->mxp_incompatible)) {
- mxlnd_set_conn_status(conn, MXLND_CONN_READY);
- }
- cfs_spin_unlock(&conn->mxk_lock);
- cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
-
- /* mx_iconnect() succeeded, reset delay to 0 */
- cfs_write_lock(&kmxlnd_data.kmx_global_lock);
- peer->mxp_reconnect_time = 0;
- peer->mxp_conn->mxk_sid = sid;
- cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+ write_lock(&kmxlnd_data.kmx_global_lock);
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_epa = status.source;
+ mx_set_endpoint_addr_context(conn->mxk_epa, (void *) conn);
+ if (msg_type == MXLND_MSG_ICON_ACK && likely(!peer->mxp_incompatible)) {
+ mxlnd_set_conn_status(conn, MXLND_CONN_READY);
+ }
+ spin_unlock(&conn->mxk_lock);
+ write_unlock(&kmxlnd_data.kmx_global_lock);
+
+ /* mx_iconnect() succeeded, reset delay to 0 */
+ write_lock(&kmxlnd_data.kmx_global_lock);
+ peer->mxp_reconnect_time = 0;
+ peer->mxp_conn->mxk_sid = sid;
+ write_unlock(&kmxlnd_data.kmx_global_lock);
/* marshal CONN_REQ or CONN_ACK msg */
/* we are still using the conn ref from iconnect() - do not take another */
CNETERR("Can't obtain %s tx for %s\n",
mxlnd_msgtype_to_str(type),
libcfs_nid2str(peer->mxp_nid));
- cfs_spin_lock(&conn->mxk_lock);
- mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
+ mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+ spin_unlock(&conn->mxk_lock);
mxlnd_conn_decref(conn);
return;
}
unsigned long next = 0; /* jiffies */
kmx_peer_t *peer = NULL;
kmx_conn_t *conn = NULL;
- cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- cfs_read_lock(g_lock);
- for (i = 0; i < MXLND_HASH_SIZE; i++) {
- cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
- mxp_list) {
+ read_lock(g_lock);
+ for (i = 0; i < MXLND_HASH_SIZE; i++) {
+ cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
+ mxp_list) {
- if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
- cfs_read_unlock(g_lock);
+ if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+ read_unlock(g_lock);
return next;
}
continue;
}
- cfs_spin_lock(&conn->mxk_lock);
+ spin_lock(&conn->mxk_lock);
- /* if nothing pending (timeout == 0) or
- * if conn is already disconnected,
- * skip this conn */
- if (conn->mxk_timeout == 0 ||
- conn->mxk_status == MXLND_CONN_DISCONNECT) {
- cfs_spin_unlock(&conn->mxk_lock);
+ /* if nothing pending (timeout == 0) or
+ * if conn is already disconnected,
+ * skip this conn */
+ if (conn->mxk_timeout == 0 ||
+ conn->mxk_status == MXLND_CONN_DISCONNECT) {
+ spin_unlock(&conn->mxk_lock);
mxlnd_conn_decref(conn);
continue;
}
disconnect = 0;
- if (cfs_time_aftereq(now, conn->mxk_timeout)) {
- disconnect = 1;
- }
- cfs_spin_unlock(&conn->mxk_lock);
+ if (cfs_time_aftereq(now, conn->mxk_timeout))
+ disconnect = 1;
+ spin_unlock(&conn->mxk_lock);
- if (disconnect) {
- mxlnd_conn_disconnect(conn, 1, 1);
- }
- mxlnd_conn_decref(conn);
- }
- }
- cfs_read_unlock(g_lock);
- if (next == 0) next = now + MXLND_COMM_TIMEOUT;
+ if (disconnect)
+ mxlnd_conn_disconnect(conn, 1, 1);
+ mxlnd_conn_decref(conn);
+ }
+ }
+ read_unlock(g_lock);
+ if (next == 0)
+ next = now + MXLND_COMM_TIMEOUT;
- return next;
+ return next;
}
void
kmx_msg_t *msg = &cp->mxr_msg;
kmx_peer_t *peer = cp->mxr_peer;
kmx_conn_t *conn = NULL;
- cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
mx_decompose_endpoint_addr2(cp->mxr_epa, &nic_id, &ep_id, &sid);
}
peer->mxp_conn->mxk_sid = sid;
LASSERT(peer->mxp_ep_id == ep_id);
- cfs_write_lock(g_lock);
+ write_lock(g_lock);
existing_peer = mxlnd_find_peer_by_nid_locked(msg->mxm_srcnid);
if (existing_peer) {
mxlnd_conn_decref(peer->mxp_conn);
&kmxlnd_data.kmx_peers[hash]);
cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
}
- cfs_write_unlock(g_lock);
+ write_unlock(g_lock);
} else {
ret = mxlnd_conn_alloc(&conn, peer); /* adds 2nd ref */
- cfs_write_lock(g_lock);
+ write_lock(g_lock);
mxlnd_peer_decref(peer); /* drop ref taken above */
- cfs_write_unlock(g_lock);
+ write_unlock(g_lock);
if (ret != 0) {
CNETERR("Cannot allocate mxp_conn\n");
goto cleanup;
conn = peer->mxp_conn;
}
}
- cfs_write_lock(g_lock);
- peer->mxp_incompatible = incompatible;
- cfs_write_unlock(g_lock);
- cfs_spin_lock(&conn->mxk_lock);
- conn->mxk_incarnation = msg->mxm_srcstamp;
- mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
- cfs_spin_unlock(&conn->mxk_lock);
+ write_lock(g_lock);
+ peer->mxp_incompatible = incompatible;
+ write_unlock(g_lock);
+ spin_lock(&conn->mxk_lock);
+ conn->mxk_incarnation = msg->mxm_srcstamp;
+ mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
+ spin_unlock(&conn->mxk_lock);
/* handle_conn_ack() will create the CONN_ACK msg */
mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_ACK);
ret = -1;
goto failed;
}
- cfs_write_lock(&kmxlnd_data.kmx_global_lock);
- peer->mxp_incompatible = incompatible;
- cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
- cfs_spin_lock(&conn->mxk_lock);
+ write_lock(&kmxlnd_data.kmx_global_lock);
+ peer->mxp_incompatible = incompatible;
+ write_unlock(&kmxlnd_data.kmx_global_lock);
+ spin_lock(&conn->mxk_lock);
conn->mxk_credits = *kmxlnd_tunables.kmx_peercredits;
conn->mxk_outstanding = 0;
conn->mxk_incarnation = msg->mxm_srcstamp;
libcfs_nid2str(msg->mxm_srcnid));
mxlnd_set_conn_status(conn, MXLND_CONN_READY);
}
- cfs_spin_unlock(&conn->mxk_lock);
+ spin_unlock(&conn->mxk_lock);
- if (!incompatible)
- mxlnd_check_sends(peer);
+ if (!incompatible)
+ mxlnd_check_sends(peer);
failed:
- if (ret < 0) {
- cfs_spin_lock(&conn->mxk_lock);
- mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
- cfs_spin_unlock(&conn->mxk_lock);
- }
+ if (ret < 0) {
+ spin_lock(&conn->mxk_lock);
+ mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+ spin_unlock(&conn->mxk_lock);
+ }
- if (incompatible) mxlnd_conn_disconnect(conn, 0, 0);
+ if (incompatible) mxlnd_conn_disconnect(conn, 0, 0);
- mxlnd_connparams_free(cp);
- return;
+ mxlnd_connparams_free(cp);
+ return;
}
int
mxlnd_abort_msgs(void)
{
- int count = 0;
- cfs_list_t *orphans = &kmxlnd_data.kmx_orphan_msgs;
- cfs_spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
+ int count = 0;
+ cfs_list_t *orphans = &kmxlnd_data.kmx_orphan_msgs;
+ spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
- /* abort orphans */
- cfs_spin_lock(g_conn_lock);
- while (!cfs_list_empty(orphans)) {
- kmx_ctx_t *ctx = NULL;
- kmx_conn_t *conn = NULL;
+ /* abort orphans */
+ spin_lock(g_conn_lock);
+ while (!cfs_list_empty(orphans)) {
+ kmx_ctx_t *ctx = NULL;
+ kmx_conn_t *conn = NULL;
- ctx = cfs_list_entry(orphans->next, kmx_ctx_t, mxc_list);
- cfs_list_del_init(&ctx->mxc_list);
- cfs_spin_unlock(g_conn_lock);
+ ctx = cfs_list_entry(orphans->next, kmx_ctx_t, mxc_list);
+ cfs_list_del_init(&ctx->mxc_list);
+ spin_unlock(g_conn_lock);
ctx->mxc_errno = -ECONNABORTED;
conn = ctx->mxc_conn;
}
count++;
- cfs_spin_lock(g_conn_lock);
- }
- cfs_spin_unlock(g_conn_lock);
+ spin_lock(g_conn_lock);
+ }
+ spin_unlock(g_conn_lock);
- return count;
+ return count;
}
int
mxlnd_free_conn_zombies(void)
{
- int count = 0;
- cfs_list_t *zombies = &kmxlnd_data.kmx_conn_zombies;
- cfs_spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
- cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ int count = 0;
+ cfs_list_t *zombies = &kmxlnd_data.kmx_conn_zombies;
+ spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
+ rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
- /* cleanup any zombies */
- cfs_spin_lock(g_conn_lock);
- while (!cfs_list_empty(zombies)) {
- kmx_conn_t *conn = NULL;
+ /* cleanup any zombies */
+ spin_lock(g_conn_lock);
+ while (!cfs_list_empty(zombies)) {
+ kmx_conn_t *conn = NULL;
- conn = cfs_list_entry(zombies->next, kmx_conn_t, mxk_zombie);
- cfs_list_del_init(&conn->mxk_zombie);
- cfs_spin_unlock(g_conn_lock);
+ conn = cfs_list_entry(zombies->next, kmx_conn_t, mxk_zombie);
+ cfs_list_del_init(&conn->mxk_zombie);
+ spin_unlock(g_conn_lock);
- cfs_write_lock(g_lock);
- mxlnd_conn_free_locked(conn);
- cfs_write_unlock(g_lock);
+ write_lock(g_lock);
+ mxlnd_conn_free_locked(conn);
+ write_unlock(g_lock);
- count++;
- cfs_spin_lock(g_conn_lock);
- }
- cfs_spin_unlock(g_conn_lock);
- CDEBUG(D_NET, "%s: freed %d zombies\n", __func__, count);
- return count;
+ count++;
+ spin_lock(g_conn_lock);
+ }
+ spin_unlock(g_conn_lock);
+ CDEBUG(D_NET, "%s: freed %d zombies\n", __func__, count);
+ return count;
}
/**
while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
int ret = 0;
kmx_connparams_t *cp = NULL;
- cfs_spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
- cfs_list_t *conn_reqs = &kmxlnd_data.kmx_conn_reqs;
+ spinlock_t *g_conn_lock = &kmxlnd_data.kmx_conn_lock;
+ cfs_list_t *conn_reqs = &kmxlnd_data.kmx_conn_reqs;
- ret = cfs_down_interruptible(&kmxlnd_data.kmx_conn_sem);
+ ret = down_interruptible(&kmxlnd_data.kmx_conn_sem);
if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
break;
ret = mxlnd_abort_msgs();
ret += mxlnd_free_conn_zombies();
- cfs_spin_lock(g_conn_lock);
- if (cfs_list_empty(conn_reqs)) {
- if (ret == 0)
- CNETERR("connd woke up but did not "
- "find a kmx_connparams_t or zombie conn\n");
- cfs_spin_unlock(g_conn_lock);
- continue;
- }
- cp = cfs_list_entry(conn_reqs->next, kmx_connparams_t,
- mxr_list);
- cfs_list_del_init(&cp->mxr_list);
- cfs_spin_unlock(g_conn_lock);
+ spin_lock(g_conn_lock);
+ if (cfs_list_empty(conn_reqs)) {
+ if (ret == 0)
+ CNETERR("connd woke up but did not find a "
+ "kmx_connparams_t or zombie conn\n");
+ spin_unlock(g_conn_lock);
+ continue;
+ }
+ cp = cfs_list_entry(conn_reqs->next, kmx_connparams_t,
+ mxr_list);
+ cfs_list_del_init(&cp->mxr_list);
+ spin_unlock(g_conn_lock);
switch (MXLND_MSG_TYPE(cp->mxr_match)) {
case MXLND_MSG_CONN_REQ:
kmx_peer_t *peer = NULL;
kmx_peer_t *temp = NULL;
kmx_conn_t *conn = NULL;
- cfs_rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
+ rwlock_t *g_lock = &kmxlnd_data.kmx_global_lock;
cfs_daemonize("mxlnd_timeoutd");
}
/* try to progress peers' txs */
- cfs_write_lock(g_lock);
+ write_lock(g_lock);
for (i = 0; i < MXLND_HASH_SIZE; i++) {
cfs_list_t *peers = &kmxlnd_data.kmx_peers[i];
cfs_time_after(now,
conn->mxk_last_tx +
CFS_HZ)) {
- cfs_write_unlock(g_lock);
- mxlnd_check_sends(peer);
- cfs_write_lock(g_lock);
- }
- mxlnd_conn_decref(conn); /* until here */
- mxlnd_peer_decref(peer); /* ...to here */
- }
- }
- cfs_write_unlock(g_lock);
+ write_unlock(g_lock);
+ mxlnd_check_sends(peer);
+ write_lock(g_lock);
+ }
+ mxlnd_conn_decref(conn); /* until here */
+ mxlnd_peer_decref(peer); /* ...to here */
+ }
+ }
+ write_unlock(g_lock);
mxlnd_sleep(delay);
}
CFS_INIT_LIST_HEAD(&peer->ibp_conns);
CFS_INIT_LIST_HEAD(&peer->ibp_tx_queue);
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
/* always called with a ref on ni, which prevents ni being shutdown */
LASSERT (net->ibn_shutdown == 0);
/* npeers only grows with the global lock held */
cfs_atomic_inc(&net->ibn_npeers);
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
*peerp = peer;
return 0;
int i;
unsigned long flags;
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
*nidp = peer->ibp_nid;
*count = cfs_atomic_read(&peer->ibp_refcount);
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
- return 0;
- }
- }
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
+ return 0;
+ }
+ }
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
return -ENOENT;
}
unsigned long flags;
int rc = -ENOENT;
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (nid != LNET_NID_ANY) {
lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
}
}
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
kiblnd_txlist_done(ni, &zombies, -EIO);
int i;
unsigned long flags;
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
conn = cfs_list_entry(ctmp, kib_conn_t,
ibc_list);
kiblnd_conn_addref(conn);
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
- return conn;
- }
- }
- }
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
+ return conn;
+ }
+ }
+ }
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
return NULL;
}
void
kiblnd_debug_conn (kib_conn_t *conn)
{
- cfs_list_t *tmp;
- int i;
+ cfs_list_t *tmp;
+ int i;
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
cfs_atomic_read(&conn->ibc_refcount), conn,
for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
kiblnd_debug_rx(&conn->ibc_rxs[i]);
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
}
int
* she must dispose of 'cmid'. (Actually I'd block forever if I tried
* to destroy 'cmid' here since I'm called from the CM which still has
* its ref on 'cmid'). */
- cfs_rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_net_t *net = peer->ibp_ni->ni_data;
kib_dev_t *dev = net->ibn_dev;
struct ib_qp_init_attr *init_qp_attr;
CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
CFS_INIT_LIST_HEAD(&conn->ibc_active_txs);
- cfs_spin_lock_init(&conn->ibc_lock);
+ spin_lock_init(&conn->ibc_lock);
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
sizeof(*conn->ibc_connvars));
- if (conn->ibc_connvars == NULL) {
- CERROR("Can't allocate in-progress connection state\n");
- goto failed_2;
- }
+ if (conn->ibc_connvars == NULL) {
+ CERROR("Can't allocate in-progress connection state\n");
+ goto failed_2;
+ }
- cfs_write_lock_irqsave(glock, flags);
- if (dev->ibd_failover) {
- cfs_write_unlock_irqrestore(glock, flags);
- CERROR("%s: failover in progress\n", dev->ibd_ifname);
- goto failed_2;
- }
+ write_lock_irqsave(glock, flags);
+ if (dev->ibd_failover) {
+ write_unlock_irqrestore(glock, flags);
+ CERROR("%s: failover in progress\n", dev->ibd_ifname);
+ goto failed_2;
+ }
if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
/* wakeup failover thread and teardown connection */
cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
}
- cfs_write_unlock_irqrestore(glock, flags);
+ write_unlock_irqrestore(glock, flags);
CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
cmid->device->name, dev->ibd_ifname);
goto failed_2;
kiblnd_setup_mtu_locked(cmid);
- cfs_write_unlock_irqrestore(glock, flags);
+ write_unlock_irqrestore(glock, flags);
LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
IBLND_RX_MSG_PAGES(version));
- if (rc != 0)
- goto failed_2;
+ if (rc != 0)
+ goto failed_2;
- kiblnd_map_rx_descs(conn);
+ kiblnd_map_rx_descs(conn);
#ifdef HAVE_OFED_IB_COMP_VECTOR
cq = ib_create_cq(cmid->device,
/* correct # of posted buffers
* NB locking needed now I'm racing with completion */
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
- cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
/* cmid will be destroyed by CM(ofed) after cm_callback
* returned, so we can't refer it anymore
unsigned long flags;
int count = 0;
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (nid != LNET_NID_ANY)
lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
}
}
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
/* wildcards always succeed */
if (nid == LNET_NID_ANY)
void
kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
{
- cfs_time_t last_alive = 0;
- cfs_time_t now = cfs_time_current();
- cfs_rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer;
- unsigned long flags;
+ cfs_time_t last_alive = 0;
+ cfs_time_t now = cfs_time_current();
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_peer_t *peer;
+ unsigned long flags;
- cfs_read_lock_irqsave(glock, flags);
+ read_lock_irqsave(glock, flags);
peer = kiblnd_find_peer_locked(nid);
if (peer != NULL) {
last_alive = peer->ibp_last_alive;
}
- cfs_read_unlock_irqrestore(glock, flags);
+ read_unlock_irqrestore(glock, flags);
if (last_alive != 0)
*when = last_alive;
unsigned long flags;
int i = 0;
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (dev->ibd_failover) {
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (i++ % 50 == 0)
- CDEBUG(D_NET, "Wait for dev(%s) failover\n", dev->ibd_ifname);
- cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ while (dev->ibd_failover) {
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ if (i++ % 50 == 0)
+ CDEBUG(D_NET, "%s: Wait for failover\n",
+ dev->ibd_ifname);
+ cfs_schedule_timeout(cfs_time_seconds(1) / 100);
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- }
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ }
- kiblnd_hdev_addref_locked(dev->ibd_hdev);
- hdev = dev->ibd_hdev;
+ kiblnd_hdev_addref_locked(dev->ibd_hdev);
+ hdev = dev->ibd_hdev;
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- return hdev;
+ return hdev;
}
static void
if (fps->fps_net == NULL) /* intialized? */
return;
- cfs_spin_lock(&fps->fps_lock);
+ spin_lock(&fps->fps_lock);
while (!cfs_list_empty(&fps->fps_pool_list)) {
kib_fmr_pool_t *fpo = cfs_list_entry(fps->fps_pool_list.next,
cfs_list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
}
- cfs_spin_unlock(&fps->fps_lock);
+ spin_unlock(&fps->fps_lock);
}
static void
fps->fps_cpt = cpt;
fps->fps_pool_size = pool_size;
fps->fps_flush_trigger = flush_trigger;
- cfs_spin_lock_init(&fps->fps_lock);
+ spin_lock_init(&fps->fps_lock);
CFS_INIT_LIST_HEAD(&fps->fps_pool_list);
CFS_INIT_LIST_HEAD(&fps->fps_failed_pool_list);
fmr->fmr_pool = NULL;
fmr->fmr_pfmr = NULL;
- cfs_spin_lock(&fps->fps_lock);
+ spin_lock(&fps->fps_lock);
fpo->fpo_map_count --; /* decref the pool */
cfs_list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
fps->fps_version ++;
}
}
- cfs_spin_unlock(&fps->fps_lock);
+ spin_unlock(&fps->fps_lock);
if (!cfs_list_empty(&zombies))
kiblnd_destroy_fmr_pool_list(&zombies);
int rc;
again:
- cfs_spin_lock(&fps->fps_lock);
- version = fps->fps_version;
- cfs_list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_map_count ++;
- cfs_spin_unlock(&fps->fps_lock);
+ spin_lock(&fps->fps_lock);
+ version = fps->fps_version;
+ cfs_list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
+ fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_map_count++;
+ spin_unlock(&fps->fps_lock);
pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
pages, npages, iov);
return 0;
}
- cfs_spin_lock(&fps->fps_lock);
- fpo->fpo_map_count --;
- if (PTR_ERR(pfmr) != -EAGAIN) {
- cfs_spin_unlock(&fps->fps_lock);
- return PTR_ERR(pfmr);
- }
+ spin_lock(&fps->fps_lock);
+ fpo->fpo_map_count--;
+ if (PTR_ERR(pfmr) != -EAGAIN) {
+ spin_unlock(&fps->fps_lock);
+ return PTR_ERR(pfmr);
+ }
- /* EAGAIN and ... */
- if (version != fps->fps_version) {
- cfs_spin_unlock(&fps->fps_lock);
- goto again;
- }
- }
+ /* EAGAIN and ... */
+ if (version != fps->fps_version) {
+ spin_unlock(&fps->fps_lock);
+ goto again;
+ }
+ }
- if (fps->fps_increasing) {
- cfs_spin_unlock(&fps->fps_lock);
- CDEBUG(D_NET, "Another thread is allocating new "
- "FMR pool, waiting for her to complete\n");
- cfs_schedule();
- goto again;
+ if (fps->fps_increasing) {
+ spin_unlock(&fps->fps_lock);
+ CDEBUG(D_NET, "Another thread is allocating new "
+ "FMR pool, waiting for her to complete\n");
+ cfs_schedule();
+ goto again;
- }
+ }
- if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
- /* someone failed recently */
- cfs_spin_unlock(&fps->fps_lock);
- return -EAGAIN;
- }
+ if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
+ /* someone failed recently */
+ spin_unlock(&fps->fps_lock);
+ return -EAGAIN;
+ }
- fps->fps_increasing = 1;
- cfs_spin_unlock(&fps->fps_lock);
+ fps->fps_increasing = 1;
+ spin_unlock(&fps->fps_lock);
- CDEBUG(D_NET, "Allocate new FMR pool\n");
- rc = kiblnd_create_fmr_pool(fps, &fpo);
- cfs_spin_lock(&fps->fps_lock);
- fps->fps_increasing = 0;
- if (rc == 0) {
- fps->fps_version ++;
- cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
- } else {
- fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
- }
- cfs_spin_unlock(&fps->fps_lock);
+ CDEBUG(D_NET, "Allocate new FMR pool\n");
+ rc = kiblnd_create_fmr_pool(fps, &fpo);
+ spin_lock(&fps->fps_lock);
+ fps->fps_increasing = 0;
+ if (rc == 0) {
+ fps->fps_version++;
+ cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+ } else {
+ fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ }
+ spin_unlock(&fps->fps_lock);
- goto again;
+ goto again;
}
static void
if (ps->ps_net == NULL) /* intialized? */
return;
- cfs_spin_lock(&ps->ps_lock);
+ spin_lock(&ps->ps_lock);
while (!cfs_list_empty(&ps->ps_pool_list)) {
kib_pool_t *po = cfs_list_entry(ps->ps_pool_list.next,
kib_pool_t, po_list);
else
cfs_list_add(&po->po_list, &ps->ps_failed_pool_list);
}
- cfs_spin_unlock(&ps->ps_lock);
+ spin_unlock(&ps->ps_lock);
}
static void
ps->ps_node_fini = nd_fini;
ps->ps_pool_size = size;
strncpy(ps->ps_name, name, IBLND_POOL_NAME_LEN);
- cfs_spin_lock_init(&ps->ps_lock);
+ spin_lock_init(&ps->ps_lock);
CFS_INIT_LIST_HEAD(&ps->ps_pool_list);
CFS_INIT_LIST_HEAD(&ps->ps_failed_pool_list);
kib_pool_t *tmp;
cfs_time_t now = cfs_time_current();
- cfs_spin_lock(&ps->ps_lock);
+ spin_lock(&ps->ps_lock);
if (ps->ps_node_fini != NULL)
ps->ps_node_fini(pool, node);
if (kiblnd_pool_is_idle(pool, now))
cfs_list_move(&pool->po_list, &zombies);
}
- cfs_spin_unlock(&ps->ps_lock);
+ spin_unlock(&ps->ps_lock);
- if (!cfs_list_empty(&zombies))
- kiblnd_destroy_pool_list(&zombies);
+ if (!cfs_list_empty(&zombies))
+ kiblnd_destroy_pool_list(&zombies);
}
cfs_list_t *
int rc;
again:
- cfs_spin_lock(&ps->ps_lock);
+ spin_lock(&ps->ps_lock);
cfs_list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
if (cfs_list_empty(&pool->po_free_list))
continue;
/* still hold the lock */
ps->ps_node_init(pool, node);
}
- cfs_spin_unlock(&ps->ps_lock);
- return node;
- }
+ spin_unlock(&ps->ps_lock);
+ return node;
+ }
- /* no available tx pool and ... */
- if (ps->ps_increasing) {
- /* another thread is allocating a new pool */
- cfs_spin_unlock(&ps->ps_lock);
+ /* no available tx pool and ... */
+ if (ps->ps_increasing) {
+ /* another thread is allocating a new pool */
+ spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "Another thread is allocating new "
"%s pool, waiting for her to complete\n",
ps->ps_name);
goto again;
}
- if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
- /* someone failed recently */
- cfs_spin_unlock(&ps->ps_lock);
- return NULL;
- }
+ if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
+ /* someone failed recently */
+ spin_unlock(&ps->ps_lock);
+ return NULL;
+ }
- ps->ps_increasing = 1;
- cfs_spin_unlock(&ps->ps_lock);
+ ps->ps_increasing = 1;
+ spin_unlock(&ps->ps_lock);
- CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
+ CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
- rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
+ rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
- cfs_spin_lock(&ps->ps_lock);
+ spin_lock(&ps->ps_lock);
ps->ps_increasing = 0;
if (rc == 0) {
cfs_list_add_tail(&pool->po_list, &ps->ps_pool_list);
CERROR("Can't allocate new %s pool because out of memory\n",
ps->ps_name);
}
- cfs_spin_unlock(&ps->ps_lock);
+ spin_unlock(&ps->ps_lock);
- goto again;
+ goto again;
}
void
int rc;
int i;
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (*kiblnd_tunables.kib_map_on_demand == 0 &&
net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
goto create_tx_pool;
}
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (*kiblnd_tunables.kib_fmr_pool_size <
*kiblnd_tunables.kib_ntx / 4) {
* because we can fail to create new listener.
* But we have to close it now, otherwise rdma_bind_addr
* will return EADDRINUSE... How crap! */
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- cmid = dev->ibd_hdev->ibh_cmid;
- /* make next schedule of kiblnd_dev_need_failover
- * will return 1 for me */
- dev->ibd_hdev->ibh_cmid = NULL;
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ cmid = dev->ibd_hdev->ibh_cmid;
+ /* make next schedule of kiblnd_dev_need_failover()
+ * return 1 for me */
+ dev->ibd_hdev->ibh_cmid = NULL;
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
rdma_destroy_id(cmid);
}
goto out;
}
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
old = dev->ibd_hdev;
dev->ibd_hdev = hdev; /* take over the refcount */
}
}
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
out:
if (!cfs_list_empty(&zombie_tpo))
kiblnd_destroy_pool_list(&zombie_tpo);
kiblnd_shutdown (lnet_ni_t *ni)
{
kib_net_t *net = ni->ni_data;
- cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
int i;
unsigned long flags;
CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
cfs_atomic_read(&libcfs_kmemory));
- cfs_write_lock_irqsave(g_lock, flags);
- net->ibn_shutdown = 1;
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
+ net->ibn_shutdown = 1;
+ write_unlock_irqrestore(g_lock, flags);
switch (net->ibn_init) {
default:
kiblnd_net_fini_pools(net);
- cfs_write_lock_irqsave(g_lock, flags);
- LASSERT (net->ibn_dev->ibd_nnets > 0);
- net->ibn_dev->ibd_nnets--;
- cfs_list_del(&net->ibn_list);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
+ LASSERT(net->ibn_dev->ibd_nnets > 0);
+ net->ibn_dev->ibd_nnets--;
+ cfs_list_del(&net->ibn_list);
+ write_unlock_irqrestore(g_lock, flags);
/* fall through */
PORTAL_MODULE_USE;
memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
- cfs_rwlock_init(&kiblnd_data.kib_global_lock);
+ rwlock_init(&kiblnd_data.kib_global_lock);
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_devs);
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
- cfs_spin_lock_init(&kiblnd_data.kib_connd_lock);
+ spin_lock_init(&kiblnd_data.kib_connd_lock);
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
int nthrs;
- cfs_spin_lock_init(&sched->ibs_lock);
+ spin_lock_init(&sched->ibs_lock);
CFS_INIT_LIST_HEAD(&sched->ibs_conns);
cfs_waitq_init(&sched->ibs_waitq);
goto failed;
}
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- ibdev->ibd_nnets++;
- cfs_list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ ibdev->ibd_nnets++;
+ cfs_list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
net->ibn_init = IBLND_INIT_ALL;
typedef struct kib_poolset
{
- cfs_spinlock_t ps_lock; /* serialize */
+ spinlock_t ps_lock; /* serialize */
struct kib_net *ps_net; /* network it belongs to */
char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
cfs_list_t ps_pool_list; /* list of pools */
typedef struct
{
- cfs_spinlock_t fps_lock; /* serialize */
+ spinlock_t fps_lock; /* serialize */
struct kib_net *fps_net; /* IB network */
cfs_list_t fps_pool_list; /* FMR pool list */
cfs_list_t fps_failed_pool_list; /* FMR pool list */
struct kib_sched_info {
/* serialise */
- cfs_spinlock_t ibs_lock;
+ spinlock_t ibs_lock;
/* schedulers sleep here */
cfs_waitq_t ibs_waitq;
/* conns to check for rx completions */
cfs_waitq_t kib_failover_waitq;
cfs_atomic_t kib_nthreads; /* # live threads */
/* stabilize net/dev/peer/conn ops */
- cfs_rwlock_t kib_global_lock;
+ rwlock_t kib_global_lock;
/* hash table of all my known peers */
cfs_list_t *kib_peers;
/* size of kib_peers */
cfs_list_t kib_connd_zombies;
/* connection daemon sleeps here */
cfs_waitq_t kib_connd_waitq;
- cfs_spinlock_t kib_connd_lock; /* serialise */
+ spinlock_t kib_connd_lock; /* serialise */
struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
/* percpt data for schedulers */
struct kib_sched_info **kib_scheds;
cfs_list_t ibc_tx_queue_nocred;/* sends that don't need a credit */
cfs_list_t ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
cfs_list_t ibc_active_txs; /* active tx awaiting completion */
- cfs_spinlock_t ibc_lock; /* serialise */
+ spinlock_t ibc_lock; /* serialise */
kib_rx_t *ibc_rxs; /* the rx descs */
kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
cfs_atomic_inc(&(conn)->ibc_refcount); \
} while (0)
-#define kiblnd_conn_decref(conn) \
-do { \
- unsigned long flags; \
- \
- CDEBUG(D_NET, "conn[%p] (%d)--\n", \
- (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
- LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
- if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) { \
- cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
- cfs_list_add_tail(&(conn)->ibc_list, \
- &kiblnd_data.kib_connd_zombies); \
- cfs_waitq_signal(&kiblnd_data.kib_connd_waitq); \
- cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
- } \
+#define kiblnd_conn_decref(conn) \
+do { \
+ unsigned long flags; \
+ \
+ CDEBUG(D_NET, "conn[%p] (%d)--\n", \
+ (conn), cfs_atomic_read(&(conn)->ibc_refcount)); \
+ LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
+ if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) { \
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
+ cfs_list_add_tail(&(conn)->ibc_list, \
+ &kiblnd_data.kib_connd_zombies); \
+ cfs_waitq_signal(&kiblnd_data.kib_connd_waitq); \
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
+ } \
} while (0)
#define kiblnd_peer_addref(peer) \
struct kib_sched_info *sched = conn->ibc_sched;
unsigned long flags;
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
LASSERT(conn->ibc_nrx > 0);
conn->ibc_nrx--;
- cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
kiblnd_conn_decref(conn);
}
if (credit == IBLND_POSTRX_NO_CREDIT)
return 0;
- cfs_spin_lock(&conn->ibc_lock);
- if (credit == IBLND_POSTRX_PEER_CREDIT)
- conn->ibc_outstanding_credits++;
- else
- conn->ibc_reserved_credits++;
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
+ if (credit == IBLND_POSTRX_PEER_CREDIT)
+ conn->ibc_outstanding_credits++;
+ else
+ conn->ibc_reserved_credits++;
+ spin_unlock(&conn->ibc_lock);
- kiblnd_check_sends(conn);
- return 0;
+ kiblnd_check_sends(conn);
+ return 0;
}
kib_tx_t *
void
kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
{
- kib_tx_t *tx;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- int idle;
+ kib_tx_t *tx;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ int idle;
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
- tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
- if (tx == NULL) {
- cfs_spin_unlock(&conn->ibc_lock);
+ tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
+ if (tx == NULL) {
+ spin_unlock(&conn->ibc_lock);
CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
if (idle)
cfs_list_del(&tx->tx_list);
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
- if (idle)
- kiblnd_tx_done(ni, tx);
+ if (idle)
+ kiblnd_tx_done(ni, tx);
}
void
if (credits != 0) {
/* Have I received credits that will let me send? */
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
- if (conn->ibc_credits + credits >
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
- rc2 = conn->ibc_credits;
- cfs_spin_unlock(&conn->ibc_lock);
+ if (conn->ibc_credits + credits >
+ IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
+ rc2 = conn->ibc_credits;
+ spin_unlock(&conn->ibc_lock);
CERROR("Bad credits from %s: %d + %d > %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
!IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
conn->ibc_outstanding_credits++;
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
kiblnd_check_sends(conn);
}
case IBLND_MSG_PUT_ACK:
post_credit = IBLND_POSTRX_RSRVD_CREDIT;
- cfs_spin_lock(&conn->ibc_lock);
- tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
- msg->ibm_u.putack.ibpam_src_cookie);
- if (tx != NULL)
- cfs_list_del(&tx->tx_list);
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
+ tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
+ msg->ibm_u.putack.ibpam_src_cookie);
+ if (tx != NULL)
+ cfs_list_del(&tx->tx_list);
+ spin_unlock(&conn->ibc_lock);
if (tx == NULL) {
CERROR("Unmatched PUT_ACK from %s\n",
CERROR("Can't setup rdma for PUT to %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
- cfs_spin_lock(&conn->ibc_lock);
- tx->tx_waiting = 0; /* clear waiting and queue atomically */
- kiblnd_queue_tx_locked(tx, conn);
- cfs_spin_unlock(&conn->ibc_lock);
- break;
+ spin_lock(&conn->ibc_lock);
+ tx->tx_waiting = 0; /* clear waiting and queue atomically */
+ kiblnd_queue_tx_locked(tx, conn);
+ spin_unlock(&conn->ibc_lock);
+ break;
case IBLND_MSG_PUT_DONE:
post_credit = IBLND_POSTRX_PEER_CREDIT;
/* racing with connection establishment/teardown! */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- unsigned long flags;
-
- cfs_write_lock_irqsave(g_lock, flags);
- /* must check holding global lock to eliminate race */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
- cfs_write_unlock_irqrestore(g_lock, flags);
- return;
- }
- cfs_write_unlock_irqrestore(g_lock, flags);
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ unsigned long flags;
+
+ write_lock_irqsave(g_lock, flags);
+ /* must check holding global lock to eliminate race */
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+ write_unlock_irqrestore(g_lock, flags);
+ return;
+ }
+ write_unlock_irqrestore(g_lock, flags);
}
kiblnd_handle_rx(rx);
return;
/* OK to drop when posted enough NOOPs, since
* kiblnd_check_sends will queue NOOP again when
* posted NOOPs complete */
- cfs_spin_unlock(&conn->ibc_lock);
- kiblnd_tx_done(peer->ibp_ni, tx);
- cfs_spin_lock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
+ kiblnd_tx_done(peer->ibp_ni, tx);
+ spin_lock(&conn->ibc_lock);
CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
libcfs_nid2str(peer->ibp_nid),
conn->ibc_noops_posted);
if (done)
cfs_list_del(&tx->tx_list);
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
CERROR("Error %d posting transmit to %s\n",
if (done)
kiblnd_tx_done(peer->ibp_ni, tx);
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
- return -EIO;
+ return -EIO;
}
void
return;
}
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
LASSERT (!IBLND_OOB_CAPABLE(ver) ||
}
if (kiblnd_need_noop(conn)) {
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx != NULL)
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
+ if (tx != NULL)
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
if (tx != NULL)
kiblnd_queue_tx_locked(tx, conn);
}
break;
}
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
- kiblnd_conn_decref(conn); /* ...until here */
+ kiblnd_conn_decref(conn); /* ...until here */
}
void
kiblnd_peer_alive(conn->ibc_peer);
}
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
/* I could be racing with rdma completion. Whoever makes 'tx' idle
* gets to free it, which also drops its ref on 'conn'. */
kiblnd_conn_addref(conn); /* 1 ref for me.... */
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
if (idle)
kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
void
kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
{
- cfs_spin_lock(&conn->ibc_lock);
- kiblnd_queue_tx_locked(tx, conn);
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
+ kiblnd_queue_tx_locked(tx, conn);
+ spin_unlock(&conn->ibc_lock);
- kiblnd_check_sends(conn);
+ kiblnd_check_sends(conn);
}
static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
kib_peer_t *peer;
kib_peer_t *peer2;
kib_conn_t *conn;
- cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
int rc;
/* First time, just use a read lock since I expect to find my peer
* connected */
- cfs_read_lock_irqsave(g_lock, flags);
+ read_lock_irqsave(g_lock, flags);
peer = kiblnd_find_peer_locked(nid);
if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) {
conn = kiblnd_get_conn_locked(peer);
kiblnd_conn_addref(conn); /* 1 ref for me... */
- cfs_read_unlock_irqrestore(g_lock, flags);
+ read_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
return;
}
- cfs_read_unlock(g_lock);
- /* Re-try with a write lock */
- cfs_write_lock(g_lock);
+ read_unlock(g_lock);
+ /* Re-try with a write lock */
+ write_lock(g_lock);
peer = kiblnd_find_peer_locked(nid);
if (peer != NULL) {
if (tx != NULL)
cfs_list_add_tail(&tx->tx_list,
&peer->ibp_tx_queue);
- cfs_write_unlock_irqrestore(g_lock, flags);
- } else {
- conn = kiblnd_get_conn_locked(peer);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
+ write_unlock_irqrestore(g_lock, flags);
+ } else {
+ conn = kiblnd_get_conn_locked(peer);
+ kiblnd_conn_addref(conn); /* 1 ref for me... */
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
return;
}
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
/* Allocate a peer ready to add to the peer table and retry */
rc = kiblnd_create_peer(ni, &peer, nid);
return;
}
- cfs_write_lock_irqsave(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid);
if (peer2 != NULL) {
if (tx != NULL)
cfs_list_add_tail(&tx->tx_list,
&peer2->ibp_tx_queue);
- cfs_write_unlock_irqrestore(g_lock, flags);
- } else {
- conn = kiblnd_get_conn_locked(peer2);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
+ write_unlock_irqrestore(g_lock, flags);
+ } else {
+ conn = kiblnd_get_conn_locked(peer2);
+ kiblnd_conn_addref(conn); /* 1 ref for me... */
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
kiblnd_peer_addref(peer);
cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
kiblnd_connect_peer(peer);
kiblnd_peer_decref(peer);
cfs_time_t last_alive = 0;
unsigned long flags;
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (cfs_list_empty(&peer->ibp_conns) &&
peer->ibp_accepting == 0 &&
last_alive = peer->ibp_last_alive;
}
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (error != 0)
lnet_notify(peer->ibp_ni,
cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
}
- cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
- cfs_waitq_signal (&kiblnd_data.kib_connd_waitq);
+ cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
+ cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);
- cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
}
void
-kiblnd_close_conn (kib_conn_t *conn, int error)
+kiblnd_close_conn(kib_conn_t *conn, int error)
{
- unsigned long flags;
+ unsigned long flags;
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- kiblnd_close_conn_locked(conn, error);
+ kiblnd_close_conn_locked(conn, error);
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
void
kiblnd_handle_early_rxs(kib_conn_t *conn)
{
- unsigned long flags;
- kib_rx_t *rx;
+ unsigned long flags;
+ kib_rx_t *rx;
- LASSERT (!cfs_in_interrupt());
- LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ LASSERT(!cfs_in_interrupt());
+ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!cfs_list_empty(&conn->ibc_early_rxs)) {
- rx = cfs_list_entry(conn->ibc_early_rxs.next,
- kib_rx_t, rx_list);
- cfs_list_del(&rx->rx_list);
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ while (!cfs_list_empty(&conn->ibc_early_rxs)) {
+ rx = cfs_list_entry(conn->ibc_early_rxs.next,
+ kib_rx_t, rx_list);
+ cfs_list_del(&rx->rx_list);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_handle_rx(rx);
+ kiblnd_handle_rx(rx);
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
void
cfs_list_t *nxt;
kib_tx_t *tx;
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
cfs_list_for_each_safe (tmp, nxt, txs) {
tx = cfs_list_entry (tmp, kib_tx_t, tx_list);
}
}
- cfs_spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
- kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
- &zombies, -ECONNABORTED);
+ kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED);
}
void
LASSERT (error != 0);
LASSERT (!cfs_in_interrupt());
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (active) {
LASSERT (peer->ibp_connecting > 0);
if (peer->ibp_connecting != 0 ||
peer->ibp_accepting != 0) {
/* another connection attempt under way... */
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
return;
}
LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
}
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
kiblnd_peer_notify(peer);
}
/* connection established */
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
conn->ibc_last_send = jiffies;
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
/* start to shut down connection */
kiblnd_close_conn_locked(conn, -ECONNABORTED);
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
+ kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
- return;
- }
+ return;
+ }
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- /* Schedule blocked txs */
- cfs_spin_lock (&conn->ibc_lock);
- while (!cfs_list_empty (&txs)) {
- tx = cfs_list_entry (txs.next, kib_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
+ /* Schedule blocked txs */
+ spin_lock(&conn->ibc_lock);
+ while (!cfs_list_empty(&txs)) {
+ tx = cfs_list_entry(txs.next, kib_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
- kiblnd_queue_tx_locked(tx, conn);
- }
- cfs_spin_unlock (&conn->ibc_lock);
+ kiblnd_queue_tx_locked(tx, conn);
+ }
+ spin_unlock(&conn->ibc_lock);
- kiblnd_check_sends(conn);
+ kiblnd_check_sends(conn);
- /* schedule blocked rxs */
- kiblnd_handle_early_rxs(conn);
+ /* schedule blocked rxs */
+ kiblnd_handle_early_rxs(conn);
}
void
int
kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
{
- cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
kib_msg_t *reqmsg = priv;
kib_msg_t *ackmsg;
kib_dev_t *ibdev;
goto failed;
}
- cfs_write_lock_irqsave(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
peer2 = kiblnd_find_peer_locked(nid);
if (peer2 != NULL) {
if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
peer2->ibp_version != version) {
kiblnd_close_peer_conns_locked(peer2, -ESTALE);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
libcfs_nid2str(nid), peer2->ibp_version, version);
/* tie-break connection race in favour of the higher NID */
if (peer2->ibp_connecting != 0 &&
nid < ni->ni_nid) {
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
peer2->ibp_accepting++;
kiblnd_peer_addref(peer2);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
kiblnd_peer_decref(peer);
peer = peer2;
} else {
kiblnd_peer_addref(peer);
cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
}
conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */
- cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
/* retry connection if it's still needed and no other connection
* attempts (active or passive) are in progress
peer->ibp_incarnation = incarnation;
}
- cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (!retry)
return;
goto failed;
}
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
- msg->ibm_dststamp == net->ibn_incarnation)
- rc = 0;
- else
- rc = -ESTALE;
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ if (msg->ibm_dstnid == ni->ni_nid &&
+ msg->ibm_dststamp == net->ibn_incarnation)
+ rc = 0;
+ else
+ rc = -ESTALE;
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (rc != 0) {
CERROR("Bad connection reply from %s, rc = %d, "
unsigned long flags;
int rc;
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- incarnation = peer->ibp_incarnation;
- version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
+ incarnation = peer->ibp_incarnation;
+ version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
+ peer->ibp_version;
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
if (conn == NULL) {
/* NB. We expect to have a look at all the peers and not find any
* RDMAs to time out, so we just use a shared lock while we
* take a look... */
- cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
cfs_list_for_each (ptmp, peers) {
peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
- cfs_spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
- sendnoop = kiblnd_need_noop(conn);
- timedout = kiblnd_conn_timed_out_locked(conn);
- if (!sendnoop && !timedout) {
- cfs_spin_unlock(&conn->ibc_lock);
+ sendnoop = kiblnd_need_noop(conn);
+ timedout = kiblnd_conn_timed_out_locked(conn);
+ if (!sendnoop && !timedout) {
+ spin_unlock(&conn->ibc_lock);
continue;
}
/* +ref for 'closes' or 'checksends' */
kiblnd_conn_addref(conn);
- cfs_spin_unlock(&conn->ibc_lock);
- }
- }
+ spin_unlock(&conn->ibc_lock);
+ }
+ }
- cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
/* Handle timeout by closing the whole
* connection. We can only be sure RDMA activity
cfs_waitlink_init (&wait);
kiblnd_data.kib_connd = current;
- cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
while (!kiblnd_data.kib_shutdown) {
kib_conn_t, ibc_list);
cfs_list_del(&conn->ibc_list);
- cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
- flags);
- dropped_lock = 1;
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
+ flags);
+ dropped_lock = 1;
- kiblnd_destroy_conn(conn);
+ kiblnd_destroy_conn(conn);
- cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
- flags);
- }
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ }
- if (!cfs_list_empty (&kiblnd_data.kib_connd_conns)) {
- conn = cfs_list_entry (kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
- cfs_list_del(&conn->ibc_list);
+ if (!cfs_list_empty(&kiblnd_data.kib_connd_conns)) {
+ conn = cfs_list_entry(kiblnd_data.kib_connd_conns.next,
+ kib_conn_t, ibc_list);
+ cfs_list_del(&conn->ibc_list);
- cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
- flags);
- dropped_lock = 1;
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
+ flags);
+ dropped_lock = 1;
- kiblnd_disconnect_conn(conn);
- kiblnd_conn_decref(conn);
+ kiblnd_disconnect_conn(conn);
+ kiblnd_conn_decref(conn);
- cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
- flags);
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
}
/* careful with the jiffy wrap... */
const int p = 1;
int chunk = kiblnd_data.kib_peer_hash_size;
- cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
dropped_lock = 1;
/* Time to check for RDMA timeouts on a few more
}
deadline += p * CFS_HZ;
- cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock,
- flags);
- }
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ }
- if (dropped_lock)
- continue;
+ if (dropped_lock)
+ continue;
- /* Nothing to do for 'timeout' */
- cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add (&kiblnd_data.kib_connd_waitq, &wait);
- cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+ /* Nothing to do for 'timeout' */
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add(&kiblnd_data.kib_connd_waitq, &wait);
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+ cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
- cfs_set_current_state (CFS_TASK_RUNNING);
- cfs_waitq_del (&kiblnd_data.kib_connd_waitq, &wait);
- cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
- }
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&kiblnd_data.kib_connd_waitq, &wait);
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ }
- cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
- kiblnd_thread_fini();
- return (0);
+ kiblnd_thread_fini();
+ return 0;
}
void
LASSERT(cq == conn->ibc_cq);
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
conn->ibc_ready = 1;
cfs_waitq_signal(&sched->ibs_waitq);
}
- cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
}
void
"performance\n", name, sched->ibs_cpt);
}
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
while (!kiblnd_data.kib_shutdown) {
if (busy_loops++ >= IBLND_RESCHED) {
- cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
cfs_cond_resched();
busy_loops = 0;
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
}
did_something = 0;
cfs_list_del(&conn->ibc_sched_list);
conn->ibc_ready = 0;
- cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
if (rc == 0) {
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
- cfs_spin_lock_irqsave(&sched->ibs_lock,
+ spin_lock_irqsave(&sched->ibs_lock,
flags);
continue;
}
rc);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
continue;
}
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
if (rc != 0 || conn->ibc_ready) {
/* There may be another completion waiting; get
}
if (rc != 0) {
- cfs_spin_unlock_irqrestore(&sched->ibs_lock,
- flags);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
kiblnd_complete(&wc);
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
}
kiblnd_conn_decref(conn); /* ...drop my ref from above */
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add_exclusive(&sched->ibs_waitq, &wait);
- cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
busy_loops = 0;
cfs_waitq_del(&sched->ibs_waitq, &wait);
cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
}
- cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
kiblnd_thread_fini();
return 0;
int
kiblnd_failover_thread(void *arg)
{
- cfs_rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
kib_dev_t *dev;
cfs_waitlink_t wait;
unsigned long flags;
cfs_block_allsigs ();
cfs_waitlink_init(&wait);
- cfs_write_lock_irqsave(glock, flags);
+ write_lock_irqsave(glock, flags);
while (!kiblnd_data.kib_shutdown) {
int do_failover = 0;
if (do_failover) {
cfs_list_del_init(&dev->ibd_fail_list);
dev->ibd_failover = 1;
- cfs_write_unlock_irqrestore(glock, flags);
+ write_unlock_irqrestore(glock, flags);
- rc = kiblnd_dev_failover(dev);
+ rc = kiblnd_dev_failover(dev);
- cfs_write_lock_irqsave(glock, flags);
+ write_lock_irqsave(glock, flags);
LASSERT (dev->ibd_failover);
dev->ibd_failover = 0;
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
- cfs_write_unlock_irqrestore(glock, flags);
+ write_unlock_irqrestore(glock, flags);
- rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
- cfs_time_seconds(1));
- cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
- cfs_write_lock_irqsave(glock, flags);
+ rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+ cfs_time_seconds(1));
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+ write_lock_irqsave(glock, flags);
if (!long_sleep || rc != 0)
continue;
}
}
- cfs_write_unlock_irqrestore(glock, flags);
+ write_unlock_irqrestore(glock, flags);
kiblnd_thread_fini();
return 0;
unsigned long flags;
char *str;
-
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
+
+ spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
str = strs[idx++];
if (idx >= sizeof(strs)/sizeof(strs[0]))
idx = 0;
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
+ spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
return str;
if (kptllnd_find_target(net, id, &peer) != 0)
return;
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (peer->peer_last_alive != 0)
*when = peer->peer_last_alive;
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
kptllnd_peer_decref(peer);
return;
}
unsigned long flags;
lnet_process_id_t process_id;
- cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ read_lock(&kptllnd_data.kptl_net_rw_lock);
LASSERT (cfs_list_empty(&kptllnd_data.kptl_nets));
- cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
switch (kptllnd_data.kptl_init) {
default:
LASSERT (cfs_list_empty(&kptllnd_data.kptl_sched_rxbq));
/* lock to interleave cleanly with peer birth/death */
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
LASSERT (kptllnd_data.kptl_shutdown == 0);
kptllnd_data.kptl_shutdown = 1; /* phase 1 == destroy peers */
/* no new peers possible now */
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
/* nuke all existing peers */
process_id.pid = LNET_PID_ANY;
kptllnd_peer_del(process_id);
- cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
LASSERT (kptllnd_data.kptl_n_active_peers == 0);
"Waiting for %d peers to terminate\n",
kptllnd_data.kptl_npeers);
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
cfs_pause(cfs_time_seconds(1));
- cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
flags);
}
for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
LASSERT (cfs_list_empty (&kptllnd_data.kptl_peers[i]));
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
CDEBUG(D_NET, "All peers deleted\n");
kptllnd_data.kptl_eqh = PTL_INVALID_HANDLE;
kptllnd_data.kptl_nih = PTL_INVALID_HANDLE;
- cfs_rwlock_init(&kptllnd_data.kptl_net_rw_lock);
+ rwlock_init(&kptllnd_data.kptl_net_rw_lock);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
/* Setup the sched locks/lists/waitq */
- cfs_spin_lock_init(&kptllnd_data.kptl_sched_lock);
+ spin_lock_init(&kptllnd_data.kptl_sched_lock);
cfs_waitq_init(&kptllnd_data.kptl_sched_waitq);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
/* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
- cfs_spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
+ spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
/* Setup the tx locks/lists */
- cfs_spin_lock_init(&kptllnd_data.kptl_tx_lock);
+ spin_lock_init(&kptllnd_data.kptl_tx_lock);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_idle_txs);
cfs_atomic_set(&kptllnd_data.kptl_ntx, 0);
kptllnd_data.kptl_nak_msg->ptlm_srcpid = the_lnet.ln_pid;
kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
- cfs_rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
+ rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
cfs_waitq_init(&kptllnd_data.kptl_watchdog_waitq);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
kptllnd_data.kptl_nak_msg->ptlm_srcnid = ni->ni_nid;
cfs_atomic_set(&net->net_refcount, 1);
- cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
+ write_lock(&kptllnd_data.kptl_net_rw_lock);
cfs_list_add_tail(&net->net_list, &kptllnd_data.kptl_nets);
- cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
+ write_unlock(&kptllnd_data.kptl_net_rw_lock);
return 0;
failed:
ni->ni_data = NULL;
net->net_ni = NULL;
- cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
+ write_lock(&kptllnd_data.kptl_net_rw_lock);
kptllnd_net_decref(net);
cfs_list_del_init(&net->net_list);
- cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
+ write_unlock(&kptllnd_data.kptl_net_rw_lock);
/* Can't nuke peers here - they are shared among all NIs */
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
net->net_shutdown = 1; /* Order with peer creation */
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
i = 2;
while (cfs_atomic_read(&net->net_refcount) != 0) {
typedef struct kptl_rx_buffer_pool
{
- cfs_spinlock_t rxbp_lock;
+ spinlock_t rxbp_lock;
cfs_list_t rxbp_list; /* all allocated buffers */
int rxbp_count; /* # allocated buffers */
int rxbp_reserved; /* # requests to buffer */
cfs_list_t peer_list;
cfs_atomic_t peer_refcount; /* The current references */
enum kptllnd_peer_state peer_state;
- cfs_spinlock_t peer_lock; /* serialize */
+ spinlock_t peer_lock; /* serialize */
cfs_list_t peer_noops; /* PTLLND_MSG_TYPE_NOOP txs */
cfs_list_t peer_sendq; /* txs waiting for mh handles */
cfs_list_t peer_activeq; /* txs awaiting completion */
__u64 kptl_incarnation; /* which one am I */
ptl_handle_eq_t kptl_eqh; /* Event Queue (EQ) */
- cfs_rwlock_t kptl_net_rw_lock; /* serialise... */
- cfs_list_t kptl_nets; /* kptl_net instances */
+ rwlock_t kptl_net_rw_lock; /* serialise... */
+ cfs_list_t kptl_nets; /* kptl_net instance*/
- cfs_spinlock_t kptl_sched_lock; /* serialise... */
+ spinlock_t kptl_sched_lock; /* serialise... */
cfs_waitq_t kptl_sched_waitq; /* schedulers sleep here */
cfs_list_t kptl_sched_txq; /* tx requiring attention */
cfs_list_t kptl_sched_rxq; /* rx requiring attention */
cfs_mem_cache_t* kptl_rx_cache; /* rx descripter cache */
cfs_atomic_t kptl_ntx; /* # tx descs allocated */
- cfs_spinlock_t kptl_tx_lock; /* serialise idle tx list*/
- cfs_list_t kptl_idle_txs; /* idle tx descriptors */
+ spinlock_t kptl_tx_lock; /* serialise idle tx list*/
+ cfs_list_t kptl_idle_txs; /* idle tx descriptors */
- cfs_rwlock_t kptl_peer_rw_lock; /* lock for peer table */
+ rwlock_t kptl_peer_rw_lock; /* lock for peer table */
cfs_list_t *kptl_peers; /* hash table of all my known peers */
cfs_list_t kptl_closing_peers; /* peers being closed */
cfs_list_t kptl_zombie_peers; /* peers waiting for refs to drain */
int kptl_expected_peers; /* # peers I can buffer HELLOs from */
kptl_msg_t *kptl_nak_msg; /* common NAK message */
- cfs_spinlock_t kptl_ptlid2str_lock; /* serialise str ops */
+ spinlock_t kptl_ptlid2str_lock; /* serialise str ops */
};
struct kptl_net
static inline void
kptllnd_rx_buffer_addref(kptl_rx_buffer_t *rxb)
{
- unsigned long flags;
+ unsigned long flags;
- cfs_spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
- rxb->rxb_refcount++;
- cfs_spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
+ spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
+ rxb->rxb_refcount++;
+ spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
}
static inline void
kptllnd_rx_buffer_decref_locked(kptl_rx_buffer_t *rxb)
{
- if (--(rxb->rxb_refcount) == 0) {
- cfs_spin_lock(&kptllnd_data.kptl_sched_lock);
+ if (--(rxb->rxb_refcount) == 0) {
+ spin_lock(&kptllnd_data.kptl_sched_lock);
- cfs_list_add_tail(&rxb->rxb_repost_list,
- &kptllnd_data.kptl_sched_rxbq);
- cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+ cfs_list_add_tail(&rxb->rxb_repost_list,
+ &kptllnd_data.kptl_sched_rxbq);
+ cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
- cfs_spin_unlock(&kptllnd_data.kptl_sched_lock);
- }
+ spin_unlock(&kptllnd_data.kptl_sched_lock);
+ }
}
static inline void
unsigned long flags;
int count;
- cfs_spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
- count = --(rxb->rxb_refcount);
- cfs_spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
+ spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
+ count = --(rxb->rxb_refcount);
+ spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
if (count == 0)
kptllnd_rx_buffer_post(rxb);
kptl_peer_t *peer;
unsigned long flags;
- cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
- peer = kptllnd_id2peer_locked(id);
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ peer = kptllnd_id2peer_locked(id);
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
return peer;
}
return -EIO;
}
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
tx->tx_lnet_msg = lntmsg;
/* lnet_finalize() will be called when tx is torn down, so I must
/* peer has now got my ref on 'tx' */
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
tx->tx_tposted = jiffies;
cfs_waitlink_init(&waitlink);
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
/* threads shut down in phase 2 after all peers have been destroyed */
while (kptllnd_data.kptl_shutdown < 2) {
kptl_rx_t, rx_list);
cfs_list_del(&rx->rx_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock,
flags);
kptllnd_rx_parse(rx);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
rxb_repost_list);
cfs_list_del(&rxb->rxb_repost_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock,
flags);
kptllnd_rx_buffer_post(rxb);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
kptl_tx_t, tx_list);
cfs_list_del_init(&tx->tx_list);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock, flags);
kptllnd_tx_fini(tx);
did_something = 1;
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
}
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
&waitlink);
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+ spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
flags);
if (!did_something)
cfs_set_current_state(CFS_TASK_RUNNING);
cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
counter = 0;
}
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+ spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
kptllnd_thread_fini();
return 0;
int *nsendq, int *nactiveq,
int *credits, int *outstanding_credits)
{
- cfs_rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+ rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
unsigned long flags;
cfs_list_t *ptmp;
kptl_peer_t *peer;
int i;
int rc = -ENOENT;
- cfs_read_lock_irqsave(g_lock, flags);
+ read_lock_irqsave(g_lock, flags);
for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++) {
cfs_list_for_each (ptmp, &kptllnd_data.kptl_peers[i]) {
*refcount = cfs_atomic_read(&peer->peer_refcount);
*incarnation = peer->peer_incarnation;
- cfs_spin_lock(&peer->peer_lock);
+ spin_lock(&peer->peer_lock);
*next_matchbits = peer->peer_next_matchbits;
*last_matchbits_seen = peer->peer_last_matchbits_seen;
*nsendq = kptllnd_count_queue(&peer->peer_sendq);
*nactiveq = kptllnd_count_queue(&peer->peer_activeq);
- cfs_spin_unlock(&peer->peer_lock);
+ spin_unlock(&peer->peer_lock);
rc = 0;
goto out;
}
out:
- cfs_read_unlock_irqrestore(g_lock, flags);
+ read_unlock_irqrestore(g_lock, flags);
return rc;
}
CFS_INIT_LIST_HEAD (&peer->peer_noops);
CFS_INIT_LIST_HEAD (&peer->peer_sendq);
CFS_INIT_LIST_HEAD (&peer->peer_activeq);
- cfs_spin_lock_init (&peer->peer_lock);
+ spin_lock_init(&peer->peer_lock);
peer->peer_state = PEER_STATE_ALLOCATED;
peer->peer_error = 0;
cfs_atomic_set(&peer->peer_refcount, 1); /* 1 ref for caller */
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
peer->peer_myincarnation = kptllnd_data.kptl_incarnation;
/* Only increase # peers under lock, to guarantee we dont grow it
* during shutdown */
if (net->net_shutdown) {
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
LIBCFS_FREE(peer, sizeof(*peer));
return NULL;
}
kptllnd_data.kptl_npeers++;
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
return peer;
}
LASSERT (cfs_list_empty(&peer->peer_sendq));
LASSERT (cfs_list_empty(&peer->peer_activeq));
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
if (peer->peer_state == PEER_STATE_ZOMBIE)
cfs_list_del(&peer->peer_list);
kptllnd_data.kptl_npeers--;
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
LIBCFS_FREE (peer, sizeof (*peer));
}
{
unsigned long flags;
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
kptllnd_cancel_txlist(&peer->peer_noops, txs);
kptllnd_cancel_txlist(&peer->peer_sendq, txs);
kptllnd_cancel_txlist(&peer->peer_activeq, txs);
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
}
void
int error = 0;
cfs_time_t last_alive = 0;
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (peer->peer_error != 0) {
error = peer->peer_error;
last_alive = peer->peer_last_alive;
}
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
if (error == 0)
return;
- cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ read_lock(&kptllnd_data.kptl_net_rw_lock);
cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list)
nnets++;
- cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
if (nnets == 0) /* shutdown in progress */
return;
}
memset(nets, 0, nnets * sizeof(*nets));
- cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ read_lock(&kptllnd_data.kptl_net_rw_lock);
i = 0;
cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
LASSERT (i < nnets);
kptllnd_net_addref(net);
i++;
}
- cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
for (i = 0; i < nnets; i++) {
lnet_nid_t peer_nid;
/* Check with a read lock first to avoid blocking anyone */
- cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
idle = cfs_list_empty(&kptllnd_data.kptl_closing_peers) &&
cfs_list_empty(&kptllnd_data.kptl_zombie_peers);
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
if (idle)
return;
CFS_INIT_LIST_HEAD(&txs);
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
/* Cancel txs on all zombie peers. NB anyone dropping the last peer
* ref removes it from this list, so I musn't drop the lock while
&kptllnd_data.kptl_zombie_peers);
peer->peer_state = PEER_STATE_ZOMBIE;
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
kptllnd_peer_notify(peer);
kptllnd_peer_cancel_txs(peer, &txs);
kptllnd_peer_decref(peer);
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
}
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
/* Drop peer's ref on all cancelled txs. This will get
* kptllnd_tx_fini() to abort outstanding comms if necessary. */
{
unsigned long flags;
- cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
kptllnd_peer_close_locked(peer, why);
- cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
}
int
}
again:
- cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
for (i = lo; i <= hi; i++) {
cfs_list_for_each_safe (ptmp, pnxt,
kptllnd_peer_addref(peer); /* 1 ref for me... */
- cfs_read_unlock_irqrestore(&kptllnd_data. \
+ read_unlock_irqrestore(&kptllnd_data. \
kptl_peer_rw_lock,
flags);
}
}
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
return (rc);
}
/* CAVEAT EMPTOR: I take over caller's ref on 'tx' */
unsigned long flags;
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
/* Ensure HELLO is sent first */
if (tx->tx_msg->ptlm_type == PTLLND_MSG_TYPE_NOOP)
else
cfs_list_add_tail(&tx->tx_list, &peer->peer_sendq);
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
}
LASSERT(!cfs_in_interrupt());
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
peer->peer_retry_noop = 0;
if (kptllnd_peer_send_noop(peer)) {
/* post a NOOP to return credits */
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
tx = kptllnd_get_idle_tx(TX_TYPE_SMALL_MESSAGE);
if (tx == NULL) {
kptllnd_post_tx(peer, tx, 0);
}
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
peer->peer_retry_noop = (tx == NULL);
}
!kptllnd_peer_send_noop(peer)) {
tx->tx_active = 0;
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
CDEBUG(D_NET, "%s: redundant noop\n",
libcfs_id2str(peer->peer_id));
kptllnd_tx_decref(tx);
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
continue;
}
kptllnd_tx_addref(tx); /* 1 ref for me... */
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
if (tx->tx_type == TX_TYPE_PUT_REQUEST ||
tx->tx_type == TX_TYPE_GET_REQUEST) {
kptllnd_tx_decref(tx); /* drop my ref */
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
}
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return;
failed:
again:
/* NB. Shared lock while I just look */
- cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
cfs_list_for_each_entry (peer, peers, peer_list) {
kptl_tx_t *tx;
libcfs_id2str(peer->peer_id), peer->peer_credits,
peer->peer_outstanding_credits, peer->peer_sent_credits);
- cfs_spin_lock(&peer->peer_lock);
+ spin_lock(&peer->peer_lock);
if (peer->peer_check_stamp == stamp) {
/* checked already this pass */
- cfs_spin_unlock(&peer->peer_lock);
+ spin_unlock(&peer->peer_lock);
continue;
}
nactive = kptllnd_count_queue(&peer->peer_activeq);
}
- cfs_spin_unlock(&peer->peer_lock);
+ spin_unlock(&peer->peer_lock);
if (tx == NULL && !check_sends)
continue;
kptllnd_peer_addref(peer); /* 1 ref for me... */
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
flags);
if (tx == NULL) { /* nothing timed out */
goto again;
}
- cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+ read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
}
kptl_peer_t *
kptllnd_peer_handle_hello (kptl_net_t *net,
ptl_process_id_t initiator, kptl_msg_t *msg)
{
- cfs_rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+ rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
kptl_peer_t *peer;
kptl_peer_t *new_peer;
lnet_process_id_t lpid;
return NULL;
}
- cfs_write_lock_irqsave(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
peer = kptllnd_id2peer_locked(lpid);
if (peer != NULL) {
if (msg->ptlm_dststamp != 0 &&
msg->ptlm_dststamp != peer->peer_myincarnation) {
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
CERROR("Ignoring HELLO from %s: unexpected "
"dststamp "LPX64" ("LPX64" wanted)\n",
peer->peer_max_msg_size =
msg->ptlm_u.hello.kptlhm_max_msg_size;
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
return peer;
}
if (msg->ptlm_dststamp != 0 &&
msg->ptlm_dststamp <= peer->peer_myincarnation) {
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
CERROR("Ignoring stale HELLO from %s: "
"dststamp "LPX64" (current "LPX64")\n",
kptllnd_cull_peertable_locked(lpid);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
if (peer != NULL) {
CDEBUG(D_NET, "Peer %s (%s) reconnecting:"
return NULL;
}
- cfs_write_lock_irqsave(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
again:
if (net->net_shutdown) {
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
CERROR ("Shutdown started, refusing connection from %s\n",
libcfs_id2str(lpid));
peer->peer_max_msg_size =
msg->ptlm_u.hello.kptlhm_max_msg_size;
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
CWARN("Outgoing instantiated peer %s\n",
libcfs_id2str(lpid));
} else {
LASSERT (peer->peer_state == PEER_STATE_ACTIVE);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
/* WOW! Somehow this peer completed the HELLO
* handshake while I slept. I guess I could have slept
if (kptllnd_data.kptl_n_active_peers ==
kptllnd_data.kptl_expected_peers) {
/* peer table full */
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
kptllnd_peertable_overflow_msg("Connection from ", lpid);
return NULL;
}
- cfs_write_lock_irqsave(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
kptllnd_data.kptl_expected_peers++;
goto again;
}
LASSERT (!net->net_shutdown);
kptllnd_peer_add_peertable_locked(new_peer);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
/* NB someone else could get in now and post a message before I post
* the HELLO, but post_tx/check_sends take care of that! */
kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
kptl_peer_t **peerp)
{
- cfs_rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+ rwlock_t *g_lock = &kptllnd_data.kptl_peer_rw_lock;
ptl_process_id_t ptl_id;
kptl_peer_t *new_peer;
kptl_tx_t *hello_tx;
__u64 last_matchbits_seen;
/* I expect to find the peer, so I only take a read lock... */
- cfs_read_lock_irqsave(g_lock, flags);
+ read_lock_irqsave(g_lock, flags);
*peerp = kptllnd_id2peer_locked(target);
- cfs_read_unlock_irqrestore(g_lock, flags);
+ read_unlock_irqrestore(g_lock, flags);
if (*peerp != NULL)
return 0;
if (rc != 0)
goto unwind_1;
- cfs_write_lock_irqsave(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
again:
/* Called only in lnd_send which can't happen after lnd_shutdown */
LASSERT (!net->net_shutdown);
*peerp = kptllnd_id2peer_locked(target);
if (*peerp != NULL) {
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
goto unwind_2;
}
if (kptllnd_data.kptl_n_active_peers ==
kptllnd_data.kptl_expected_peers) {
/* peer table full */
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
kptllnd_peertable_overflow_msg("Connection to ", target);
rc = -ENOMEM;
goto unwind_2;
}
- cfs_write_lock_irqsave(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
kptllnd_data.kptl_expected_peers++;
goto again;
}
kptllnd_peer_add_peertable_locked(new_peer);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
/* NB someone else could get in now and post a message before I post
* the HELLO, but post_tx/check_sends take care of that! */
kptllnd_rx_buffer_pool_init(kptl_rx_buffer_pool_t *rxbp)
{
memset(rxbp, 0, sizeof(*rxbp));
- cfs_spin_lock_init(&rxbp->rxbp_lock);
+ spin_lock_init(&rxbp->rxbp_lock);
CFS_INIT_LIST_HEAD(&rxbp->rxbp_list);
}
CDEBUG(D_NET, "kptllnd_rx_buffer_pool_reserve(%d)\n", count);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
for (;;) {
if (rxbp->rxbp_shutdown) {
break;
}
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
LIBCFS_ALLOC(rxb, sizeof(*rxb));
LIBCFS_ALLOC(buffer, bufsize);
if (buffer != NULL)
LIBCFS_FREE(buffer, bufsize);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rc = -ENOMEM;
break;
}
rxb->rxb_buffer = buffer;
rxb->rxb_mdh = PTL_INVALID_HANDLE;
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxbp->rxbp_shutdown) {
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
LIBCFS_FREE(rxb, sizeof(*rxb));
LIBCFS_FREE(buffer, bufsize);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rc = -ESHUTDOWN;
break;
}
cfs_list_add_tail(&rxb->rxb_list, &rxbp->rxbp_list);
rxbp->rxbp_count++;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
kptllnd_rx_buffer_post(rxb);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
}
if (rc == 0)
rxbp->rxbp_reserved += count;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return rc;
}
{
unsigned long flags;
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
CDEBUG(D_NET, "kptllnd_rx_buffer_pool_unreserve(%d)\n", count);
rxbp->rxbp_reserved -= count;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
void
* different MD) from when the MD is actually unlinked, to when the
* event callback tells me it has been unlinked. */
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxbp->rxbp_shutdown = 1;
rxb = cfs_list_entry (tmp, kptl_rx_buffer_t, rxb_list);
if (rxb->rxb_idle) {
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock,
+ spin_unlock_irqrestore(&rxbp->rxbp_lock,
flags);
kptllnd_rx_buffer_destroy(rxb);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock,
+ spin_lock_irqsave(&rxbp->rxbp_lock,
flags);
continue;
}
if (PtlHandleIsEqual(mdh, PTL_INVALID_HANDLE))
continue;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
rc = PtlMDUnlink(mdh);
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
#ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
/* callback clears rxb_mdh and drops net's ref
if (cfs_list_empty(&rxbp->rxbp_list))
break;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
/* Wait a bit for references to be dropped */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
cfs_pause(cfs_time_seconds(1));
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
}
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
void
any.nid = PTL_NID_ANY;
any.pid = PTL_PID_ANY;
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxbp->rxbp_shutdown) {
rxb->rxb_idle = 1;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return;
}
rxb->rxb_refcount = 1; /* net's ref */
rxb->rxb_posted = 1; /* I'm posting */
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
rc = PtlMEAttach(kptllnd_data.kptl_nih,
*kptllnd_tunables.kptl_portal,
rc = PtlMDAttach(meh, md, PTL_UNLINK, &mdh);
if (rc == PTL_OK) {
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
if (rxb->rxb_posted) /* Not auto-unlinked yet!!! */
rxb->rxb_mdh = mdh;
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
return;
}
LASSERT(rc == PTL_OK);
failed:
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxb->rxb_posted = 0;
/* XXX this will just try again immediately */
kptllnd_rx_buffer_decref_locked(rxb);
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
kptl_rx_t *
if (peer != NULL) {
/* Update credits (after I've decref-ed the buffer) */
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (post_credit == PTLLND_POSTRX_PEER_CREDIT)
peer->peer_outstanding_credits++;
peer->peer_outstanding_credits, peer->peer_sent_credits,
rx);
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* I might have to send back credits */
kptllnd_peer_check_sends(peer);
rx->rx_initiator = ev->initiator;
rx->rx_treceived = jiffies;
/* Queue for attention */
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
flags);
cfs_list_add_tail(&rx->rx_list,
&kptllnd_data.kptl_sched_rxq);
cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
- cfs_spin_unlock_irqrestore(&kptllnd_data. \
+ spin_unlock_irqrestore(&kptllnd_data. \
kptl_sched_lock, flags);
}
}
if (unlinked) {
- cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+ spin_lock_irqsave(&rxbp->rxbp_lock, flags);
rxb->rxb_posted = 0;
rxb->rxb_mdh = PTL_INVALID_HANDLE;
kptllnd_rx_buffer_decref_locked(rxb);
- cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+ spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
}
}
{
kptl_net_t *net;
- cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+ read_lock(&kptllnd_data.kptl_net_rw_lock);
cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
LASSERT (!net->net_shutdown);
if (net->net_ni->ni_nid == nid) {
kptllnd_net_addref(net);
- cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
return net;
}
}
- cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+ read_unlock(&kptllnd_data.kptl_net_rw_lock);
return NULL;
}
if (peer->peer_state == PEER_STATE_WAITING_HELLO) {
/* recoverable error - restart txs */
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
kptllnd_cancel_txlist(&peer->peer_sendq, &txs);
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
CWARN("NAK %s: Unexpected %s message\n",
libcfs_id2str(srcid),
LASSERTF (msg->ptlm_srcpid == peer->peer_id.pid, "m %u p %u\n",
msg->ptlm_srcpid, peer->peer_id.pid);
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
/* Check peer only sends when I've sent her credits */
if (peer->peer_sent_credits == 0) {
int oc = peer->peer_outstanding_credits;
int sc = peer->peer_sent_credits;
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
CERROR("%s: buffer overrun [%d/%d+%d]\n",
libcfs_id2str(peer->peer_id), c, sc, oc);
post_credit = PTLLND_POSTRX_NO_CREDIT;
}
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* See if something can go out now that credits have come in */
if (msg->ptlm_credits != 0)
PTL_RESERVED_MATCHBITS);
/* Update last match bits seen */
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (msg->ptlm_u.rdma.kptlrm_matchbits >
rx->rx_peer->peer_last_matchbits_seen)
rx->rx_peer->peer_last_matchbits_seen =
msg->ptlm_u.rdma.kptlrm_matchbits;
- cfs_spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
+ spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
rc = lnet_parse(net->net_ni,
&msg->ptlm_u.rdma.kptlrm_hdr,
if (tx == NULL)
return -ENOMEM;
- cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+ spin_lock(&kptllnd_data.kptl_tx_lock);
cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
- cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
+ spin_unlock(&kptllnd_data.kptl_tx_lock);
}
return 0;
return NULL;
}
- cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+ spin_lock(&kptllnd_data.kptl_tx_lock);
if (cfs_list_empty (&kptllnd_data.kptl_idle_txs)) {
- cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
+ spin_unlock(&kptllnd_data.kptl_tx_lock);
tx = kptllnd_alloc_tx();
if (tx == NULL)
kptl_tx_t, tx_list);
cfs_list_del(&tx->tx_list);
- cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
+ spin_unlock(&kptllnd_data.kptl_tx_lock);
}
LASSERT (cfs_atomic_read(&tx->tx_refcount)== 0);
LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
LASSERT (!tx->tx_active);
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
msg_mdh = tx->tx_msg_mdh;
rdma_mdh = tx->tx_rdma_mdh;
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
tx->tx_active = 1;
cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* These unlinks will ensure completion events (normal or unlink) will
* happen ASAP */
LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
LASSERT (!tx->tx_active);
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
msg_mdh = tx->tx_msg_mdh;
rdma_mdh = tx->tx_rdma_mdh;
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
(tx->tx_lnet_msg == NULL &&
tx->tx_replymsg == NULL));
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
if (!PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE)) {
prc = PtlMDUnlink(msg_mdh);
rdma_mdh = PTL_INVALID_HANDLE;
}
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
/* update tx_???_mdh if callback hasn't fired */
if (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE))
if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return 0;
}
kptllnd_peer_addref(peer); /* extra ref for me... */
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* This will get the watchdog thread to try aborting all the peer's
* comms again. NB, this deems it fair that 1 failing tx which can't
tx->tx_peer = NULL;
tx->tx_idle = 1;
- cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+ spin_lock(&kptllnd_data.kptl_tx_lock);
cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
- cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
+ spin_unlock(&kptllnd_data.kptl_tx_lock);
/* Must finalize AFTER freeing 'tx' */
if (msg != NULL)
if (!unlinked)
return;
- cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+ spin_lock_irqsave(&peer->peer_lock, flags);
if (ismsg)
tx->tx_msg_mdh = PTL_INVALID_HANDLE;
if (!PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE) ||
!PtlHandleIsEqual(tx->tx_rdma_mdh, PTL_INVALID_HANDLE) ||
!tx->tx_active) {
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
return;
}
cfs_list_del(&tx->tx_list);
tx->tx_active = 0;
- cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+ spin_unlock_irqrestore(&peer->peer_lock, flags);
/* drop peer's ref, but if it was the last one... */
if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
/* ...finalize it in thread context! */
- cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+ spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
- cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+ spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
flags);
}
}
int index = data->ioc_count;
int rc = -ENOENT;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
cfs_list_for_each (tmp, &kqswnal_data.kqn_activetxds) {
if (index-- != 0)
break;
}
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
return (rc);
}
/**********************************************************************/
/* Signal the start of shutdown... */
- cfs_spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
kqswnal_data.kqn_shuttingdown = 1;
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
/**********************************************************************/
/* wait for sends that have allocated a tx desc to launch or give up */
CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_idletxds);
CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_activetxds);
- cfs_spin_lock_init (&kqswnal_data.kqn_idletxd_lock);
+ spin_lock_init(&kqswnal_data.kqn_idletxd_lock);
CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_delayedtxds);
CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_donetxds);
CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
- cfs_spin_lock_init (&kqswnal_data.kqn_sched_lock);
+ spin_lock_init(&kqswnal_data.kqn_sched_lock);
cfs_waitq_init (&kqswnal_data.kqn_sched_waitq);
/* pointers/lists/locks initialised */
cfs_list_t kqn_idletxds; /* transmit descriptors free to use */
cfs_list_t kqn_activetxds; /* transmit descriptors being used */
- cfs_spinlock_t kqn_idletxd_lock; /* serialise idle txd access */
- cfs_atomic_t kqn_pending_txs;/* # transmits being prepped */
+ spinlock_t kqn_idletxd_lock; /* serialise idle txd access */
+ cfs_atomic_t kqn_pending_txs; /* # transmits being prepped */
- cfs_spinlock_t kqn_sched_lock; /* serialise packet schedulers */
+ spinlock_t kqn_sched_lock; /* serialise packet schedulers */
cfs_waitq_t kqn_sched_waitq;/* scheduler blocks here */
cfs_list_t kqn_readyrxds; /* rxds full of data */
void
kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
{
- unsigned long flags;
+ unsigned long flags;
- kqswnal_unmap_tx (ktx); /* release temporary mappings */
- ktx->ktx_state = KTX_IDLE;
+ kqswnal_unmap_tx(ktx); /* release temporary mappings */
+ ktx->ktx_state = KTX_IDLE;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
- cfs_list_del (&ktx->ktx_list); /* take off active list */
- cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
+ cfs_list_del(&ktx->ktx_list); /* take off active list */
+ cfs_list_add(&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
}
kqswnal_tx_t *
ktx->ktx_launcher = current->pid;
cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
/* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
LASSERT (ktx->ktx_nmappedpages == 0);
}
/* Complete the send in thread context */
- cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
- cfs_list_add_tail(&ktx->ktx_schedlist,
- &kqswnal_data.kqn_donetxds);
- cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+ cfs_list_add_tail(&ktx->ktx_schedlist,
+ &kqswnal_data.kqn_donetxds);
+ cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
static void
return (0);
case EP_ENOMEM: /* can't allocate ep txd => queue for later */
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
- cfs_list_add_tail (&ktx->ktx_schedlist,
- &kqswnal_data.kqn_delayedtxds);
- cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
+ cfs_list_add_tail(&ktx->ktx_schedlist,
+ &kqswnal_data.kqn_delayedtxds);
+ cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
return (0);
return;
}
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
- cfs_list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
- cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
+ cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
+ cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
- cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
}
int
cfs_daemonize ("kqswnal_sched");
cfs_block_allsigs ();
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
for (;;)
{
krx = cfs_list_entry(kqswnal_data.kqn_readyrxds.next,
kqswnal_rx_t, krx_list);
cfs_list_del (&krx->krx_list);
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
LASSERT (krx->krx_state == KRX_PARSE);
kqswnal_parse (krx);
did_something = 1;
- cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
flags);
}
ktx = cfs_list_entry(kqswnal_data.kqn_donetxds.next,
kqswnal_tx_t, ktx_schedlist);
cfs_list_del_init (&ktx->ktx_schedlist);
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
kqswnal_tx_done_in_thread_context(ktx);
did_something = 1;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
flags);
}
ktx = cfs_list_entry(kqswnal_data.kqn_delayedtxds.next,
kqswnal_tx_t, ktx_schedlist);
cfs_list_del_init (&ktx->ktx_schedlist);
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
rc = kqswnal_launch (ktx);
cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
did_something = 1;
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
flags);
}
/* nothing to do or hogging CPU */
if (!did_something || counter++ == KQSW_RESCHED) {
- cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+ spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
flags);
counter = 0;
} else if (need_resched())
cfs_schedule ();
- cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+ spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
flags);
}
}
{
unsigned long flags;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
conn->rac_my_connstamp = kranal_data.kra_connstamp++;
conn->rac_cqid = kranal_data.kra_next_cqid++;
} while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
}
int
CFS_INIT_LIST_HEAD(&conn->rac_fmaq);
CFS_INIT_LIST_HEAD(&conn->rac_rdmaq);
CFS_INIT_LIST_HEAD(&conn->rac_replyq);
- cfs_spin_lock_init(&conn->rac_lock);
+ spin_lock_init(&conn->rac_lock);
kranal_set_conn_uniqueness(conn);
unsigned long flags;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (conn->rac_state == RANAL_CONN_ESTABLISHED)
kranal_close_conn_locked(conn, error);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
}
int
/* Schedule conn on rad_new_conns */
kranal_conn_addref(conn);
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
cfs_waitq_signal(&dev->rad_waitq);
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
rrc = RapkWaitToConnect(conn->rac_rihandle);
if (rrc != RAP_SUCCESS) {
if (rc != 0)
return rc;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (!kranal_peer_active(peer)) {
/* raced with peer getting unlinked */
- cfs_write_unlock_irqrestore(&kranal_data. \
+ write_unlock_irqrestore(&kranal_data. \
kra_global_lock,
flags);
kranal_conn_decref(conn);
return -ENOMEM;
}
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
peer2 = kranal_find_peer_locked(peer_nid);
if (peer2 == NULL) {
* this while holding the global lock, to synch with connection
* destruction on NID change. */
if (kranal_data.kra_ni->ni_nid != dst_nid) {
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
if (rc != 0) {
LASSERT (!cfs_list_empty(&peer->rap_conns));
LASSERT (cfs_list_empty(&peer->rap_tx_queue));
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
CWARN("Not creating duplicate connection to %s: %d\n",
libcfs_nid2str(peer_nid), rc);
nstale = kranal_close_stale_conns_locked(peer, conn);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
/* CAVEAT EMPTOR: passive peer can disappear NOW */
CDEBUG(D_NET, "Done handshake %s:%d \n",
libcfs_nid2str(peer->rap_nid), rc);
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
LASSERT (peer->rap_connecting);
peer->rap_connecting = 0;
peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
return;
}
cfs_list_add(&zombies, &peer->rap_tx_queue);
cfs_list_del_init(&peer->rap_tx_queue);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
if (cfs_list_empty(&zombies))
return;
ras->ras_sock = sock;
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
cfs_waitq_signal(&kranal_data.kra_connd_waitq);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
return 0;
}
peer->rap_reconnect_interval = 0; /* OK to connect at any time */
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (kranal_data.kra_nonewpeers) {
/* shutdown has started already */
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
LIBCFS_FREE(peer, sizeof(*peer));
cfs_atomic_inc(&kranal_data.kra_npeers);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
*peerp = peer;
return 0;
{
kra_peer_t *peer;
- cfs_read_lock(&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
peer = kranal_find_peer_locked(nid);
if (peer != NULL) /* +1 ref for caller? */
kranal_peer_addref(peer);
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return peer;
}
cfs_list_t *ptmp;
int i;
- cfs_read_lock(&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
*portp = peer->rap_port;
*persistencep = peer->rap_persistence;
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return 0;
}
}
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return -ENOENT;
}
if (rc != 0)
return rc;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
peer2 = kranal_find_peer_locked(nid);
if (peer2 != NULL) {
peer->rap_port = port;
peer->rap_persistence++;
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
return 0;
}
int i;
int rc = -ENOENT;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (nid != LNET_NID_ANY)
lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
}
}
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
return rc;
}
cfs_list_t *ctmp;
int i;
- cfs_read_lock (&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) {
libcfs_nid2str(conn->rac_peer->rap_nid),
cfs_atomic_read(&conn->rac_refcount));
cfs_atomic_inc(&conn->rac_refcount);
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return conn;
}
}
}
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
return NULL;
}
int i;
int count = 0;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (nid != LNET_NID_ANY)
lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
}
}
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+ write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
/* wildcards always succeed */
if (nid == LNET_NID_ANY)
case RANAL_INIT_ALL:
/* Prevent new peers from being created */
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
kranal_data.kra_nonewpeers = 1;
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
/* Remove all existing peers from the peer table */
/* Wait for pending conn reqs to be handled */
i = 2;
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
flags);
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
"waiting for conn reqs to clean up\n");
cfs_pause(cfs_time_seconds(1));
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+ spin_lock_irqsave(&kranal_data.kra_connd_lock,
flags);
}
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
/* Wait for all peers to be freed */
i = 2;
for (i = 0; i < kranal_data.kra_ndevs; i++) {
kra_device_t *dev = &kranal_data.kra_devices[i];
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
cfs_waitq_signal(&dev->rad_waitq);
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
}
- cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
/* Wait for threads to exit */
i = 2;
kranal_data.kra_connstamp =
kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
- cfs_rwlock_init(&kranal_data.kra_global_lock);
+ rwlock_init(&kranal_data.kra_global_lock);
for (i = 0; i < RANAL_MAXDEVS; i++ ) {
kra_device_t *dev = &kranal_data.kra_devices[i];
CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
cfs_waitq_init(&dev->rad_waitq);
- cfs_spin_lock_init(&dev->rad_lock);
+ spin_lock_init(&dev->rad_lock);
}
kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
cfs_waitq_init(&kranal_data.kra_reaper_waitq);
- cfs_spin_lock_init(&kranal_data.kra_reaper_lock);
+ spin_lock_init(&kranal_data.kra_reaper_lock);
CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
cfs_waitq_init(&kranal_data.kra_connd_waitq);
- cfs_spin_lock_init(&kranal_data.kra_connd_lock);
+ spin_lock_init(&kranal_data.kra_connd_lock);
CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
- cfs_spin_lock_init(&kranal_data.kra_tx_lock);
+ spin_lock_init(&kranal_data.kra_tx_lock);
/* OK to call kranal_api_shutdown() to cleanup now */
kranal_data.kra_init = RANAL_INIT_DATA;
cfs_list_t rad_ready_conns;/* connections ready to tx/rx */
cfs_list_t rad_new_conns; /* new connections to complete */
cfs_waitq_t rad_waitq; /* scheduler waits here */
- cfs_spinlock_t rad_lock; /* serialise */
+ spinlock_t rad_lock; /* serialise */
void *rad_scheduler; /* scheduling thread */
unsigned int rad_nphysmap; /* # phys mappings */
unsigned int rad_nppphysmap;/* # phys pages mapped */
kra_device_t kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
int kra_ndevs; /* # devices */
- cfs_rwlock_t kra_global_lock; /* stabilize peer/conn ops */
+ rwlock_t kra_global_lock; /* stabilize peer/conn ops */
cfs_list_t *kra_peers; /* hash table of all my known peers */
int kra_peer_hash_size; /* size of kra_peers */
long kra_new_min_timeout; /* minimum timeout on any new conn */
cfs_waitq_t kra_reaper_waitq; /* reaper sleeps here */
- cfs_spinlock_t kra_reaper_lock; /* serialise */
+ spinlock_t kra_reaper_lock; /* serialise */
cfs_list_t kra_connd_peers; /* peers waiting for a connection */
cfs_list_t kra_connd_acceptq; /* accepted sockets to handshake */
cfs_waitq_t kra_connd_waitq; /* connection daemons sleep here */
- cfs_spinlock_t kra_connd_lock; /* serialise */
+ spinlock_t kra_connd_lock; /* serialise */
cfs_list_t kra_idle_txs; /* idle tx descriptors */
__u64 kra_next_tx_cookie; /* RDMA completion cookie */
- cfs_spinlock_t kra_tx_lock; /* serialise */
+ spinlock_t kra_tx_lock; /* serialise */
} kra_data_t;
#define RANAL_INIT_NOTHING 0
unsigned int rac_close_recvd; /* I've received CLOSE */
unsigned int rac_state; /* connection state */
unsigned int rac_scheduled; /* being attented to */
- cfs_spinlock_t rac_lock; /* serialise */
+ spinlock_t rac_lock; /* serialise */
kra_device_t *rac_device; /* which device */
RAP_PVOID rac_rihandle; /* RA endpoint */
kra_msg_t *rac_rxmsg; /* incoming message (FMA prefix) */
if (dev->rad_id != devid)
continue;
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
if (!dev->rad_ready) {
dev->rad_ready = 1;
cfs_waitq_signal(&dev->rad_waitq);
}
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
return;
}
kra_device_t *dev = conn->rac_device;
unsigned long flags;
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
if (!conn->rac_scheduled) {
kranal_conn_addref(conn); /* +1 ref for scheduler */
cfs_waitq_signal(&dev->rad_waitq);
}
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
}
kra_tx_t *
unsigned long flags;
kra_tx_t *tx;
- cfs_spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
if (cfs_list_empty(&kranal_data.kra_idle_txs)) {
- cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
return NULL;
}
* got a lock right now... */
tx->tx_cookie = kranal_data.kra_next_tx_cookie++;
- cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
LASSERT (tx->tx_buftype == RANAL_BUF_NONE);
LASSERT (tx->tx_msg.ram_type == RANAL_MSG_NONE);
tx->tx_msg.ram_type = RANAL_MSG_NONE;
tx->tx_conn = NULL;
- cfs_spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
cfs_list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
/* finalize AFTER freeing lnet msgs */
for (i = 0; i < 2; i++) {
tx->tx_conn = conn;
- cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ spin_lock_irqsave(&conn->rac_lock, flags);
cfs_list_add_tail(&tx->tx_list, &conn->rac_fmaq);
tx->tx_qtime = jiffies;
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
kranal_schedule_conn(conn);
}
kra_conn_t *conn;
int rc;
int retry;
- cfs_rwlock_t *g_lock = &kranal_data.kra_global_lock;
+ rwlock_t *g_lock = &kranal_data.kra_global_lock;
/* If I get here, I've committed to send, so I complete the tx with
* failure on any problems */
for (retry = 0; ; retry = 1) {
- cfs_read_lock(g_lock);
+ read_lock(g_lock);
peer = kranal_find_peer_locked(nid);
if (peer != NULL) {
conn = kranal_find_conn_locked(peer);
if (conn != NULL) {
kranal_post_fma(conn, tx);
- cfs_read_unlock(g_lock);
+ read_unlock(g_lock);
return;
}
}
/* Making connections; I'll need a write lock... */
- cfs_read_unlock(g_lock);
- cfs_write_lock_irqsave(g_lock, flags);
+ read_unlock(g_lock);
+ write_lock_irqsave(g_lock, flags);
peer = kranal_find_peer_locked(nid);
if (peer != NULL)
break;
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
if (retry) {
CERROR("Can't find peer %s\n", libcfs_nid2str(nid));
if (conn != NULL) {
/* Connection exists; queue message on it */
kranal_post_fma(conn, tx);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
return;
}
if (!(peer->rap_reconnect_interval == 0 || /* first attempt */
cfs_time_aftereq(jiffies, peer->rap_reconnect_time))) {
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
kranal_tx_done(tx, -EHOSTUNREACH);
return;
}
peer->rap_connecting = 1;
kranal_peer_addref(peer); /* extra ref for connd */
- cfs_spin_lock(&kranal_data.kra_connd_lock);
+ spin_lock(&kranal_data.kra_connd_lock);
cfs_list_add_tail(&peer->rap_connd_list,
&kranal_data.kra_connd_peers);
cfs_waitq_signal(&kranal_data.kra_connd_waitq);
- cfs_spin_unlock(&kranal_data.kra_connd_lock);
+ spin_unlock(&kranal_data.kra_connd_lock);
}
/* A connection is being established; queue the message... */
cfs_list_add_tail(&tx->tx_list, &peer->rap_tx_queue);
- cfs_write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
}
void
rrc = RapkPostRdma(conn->rac_rihandle, &tx->tx_rdma_desc);
LASSERT (rrc == RAP_SUCCESS);
- cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ spin_lock_irqsave(&conn->rac_lock, flags);
cfs_list_add_tail(&tx->tx_list, &conn->rac_rdmaq);
tx->tx_qtime = jiffies;
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
}
int
* in case of hardware/software errors that make this conn seem
* responsive even though it isn't progressing its message queues. */
- cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ spin_lock_irqsave(&conn->rac_lock, flags);
cfs_list_for_each (ttmp, &conn->rac_fmaq) {
tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on fmaq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
(now - tx->tx_qtime)/CFS_HZ);
tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on rdmaq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
(now - tx->tx_qtime)/CFS_HZ);
tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
CERROR("tx on replyq for %s blocked %lu seconds\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
(now - tx->tx_qtime)/CFS_HZ);
}
}
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
return 0;
}
again:
/* NB. We expect to check all the conns and not find any problems, so
* we just use a shared lock while we take a look... */
- cfs_read_lock(&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
cfs_list_for_each (ctmp, conns) {
conn = cfs_list_entry(ctmp, kra_conn_t, rac_hashlist);
continue;
kranal_conn_addref(conn);
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
CERROR("Conn to %s, cqid %d timed out\n",
libcfs_nid2str(conn->rac_peer->rap_nid),
conn->rac_cqid);
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
switch (conn->rac_state) {
default:
break;
}
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
kranal_conn_decref(conn);
goto again;
}
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
}
int
cfs_waitlink_init(&wait);
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
while (!kranal_data.kra_shutdown) {
did_something = 0;
kra_acceptsock_t, ras_list);
cfs_list_del(&ras->ras_list);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
flags);
CDEBUG(D_NET,"About to handshake someone\n");
CDEBUG(D_NET,"Finished handshaking someone\n");
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+ spin_lock_irqsave(&kranal_data.kra_connd_lock,
flags);
did_something = 1;
}
kra_peer_t, rap_connd_list);
cfs_list_del_init(&peer->rap_connd_list);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
flags);
kranal_connect(peer);
kranal_peer_decref(peer);
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+ spin_lock_irqsave(&kranal_data.kra_connd_lock,
flags);
did_something = 1;
}
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
cfs_set_current_state(CFS_TASK_RUNNING);
cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
- cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
}
- cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
kranal_thread_fini();
return 0;
LASSERT (timeout > 0);
- cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
if (timeout < kranal_data.kra_new_min_timeout)
kranal_data.kra_new_min_timeout = timeout;
- cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
}
int
cfs_waitlink_init(&wait);
- cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
while (!kranal_data.kra_shutdown) {
/* I wake up every 'p' seconds to check for timeouts on some
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
- cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
+ spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
flags);
cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
timeout);
- cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock,
+ spin_lock_irqsave(&kranal_data.kra_reaper_lock,
flags);
cfs_set_current_state(CFS_TASK_RUNNING);
}
min_timeout = current_min_timeout;
- cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+ spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
LASSERT (min_timeout > 0);
next_check_time += p * CFS_HZ;
- cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+ spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
if (((conn_index - chunk <= base_index &&
base_index < conn_index) ||
LASSERT (rrc == RAP_SUCCESS);
LASSERT ((event_type & RAPK_CQ_EVENT_OVERRUN) == 0);
- cfs_read_lock(&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
conn = kranal_cqid2conn_locked(cqid);
if (conn == NULL) {
/* Conn was destroyed? */
CDEBUG(D_NET, "RDMA CQID lookup %d failed\n", cqid);
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
continue;
}
CDEBUG(D_NET, "Completed %p\n",
cfs_list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list));
- cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ spin_lock_irqsave(&conn->rac_lock, flags);
LASSERT (!cfs_list_empty(&conn->rac_rdmaq));
tx = cfs_list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list);
cfs_list_add_tail(&tx->tx_list, &conn->rac_fmaq);
tx->tx_qtime = jiffies;
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
/* Get conn's fmaq processed, now I've just put something
* there */
kranal_schedule_conn(conn);
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
}
}
if ((event_type & RAPK_CQ_EVENT_OVERRUN) == 0) {
- cfs_read_lock(&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
conn = kranal_cqid2conn_locked(cqid);
if (conn == NULL) {
kranal_schedule_conn(conn);
}
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
continue;
}
for (i = 0; i < kranal_data.kra_conn_hash_size; i++) {
- cfs_read_lock(&kranal_data.kra_global_lock);
+ read_lock(&kranal_data.kra_global_lock);
conns = &kranal_data.kra_conns[i];
}
/* don't block write lockers for too long... */
- cfs_read_unlock(&kranal_data.kra_global_lock);
+ read_unlock(&kranal_data.kra_global_lock);
}
}
}
if (!conn->rac_close_recvd)
return;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (conn->rac_state == RANAL_CONN_CLOSING)
kranal_terminate_conn_locked(conn);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
return;
}
- cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ spin_lock_irqsave(&conn->rac_lock, flags);
if (cfs_list_empty(&conn->rac_fmaq)) {
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
if (cfs_time_aftereq(jiffies,
conn->rac_last_tx + conn->rac_keepalive *
cfs_list_del(&tx->tx_list);
more_to_do = !cfs_list_empty(&conn->rac_fmaq);
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
expect_reply = 0;
CDEBUG(D_NET, "sending regular msg: %p, type %02x, cookie "LPX64"\n",
/* I need credits to send this. Replace tx at the head of the
* fmaq and I'll get rescheduled when credits appear */
CDEBUG(D_NET, "EAGAIN on %p\n", conn);
- cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ spin_lock_irqsave(&conn->rac_lock, flags);
cfs_list_add(&tx->tx_list, &conn->rac_fmaq);
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
return;
}
} else {
/* LASSERT(current) above ensures this doesn't race with reply
* processing */
- cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ spin_lock_irqsave(&conn->rac_lock, flags);
cfs_list_add_tail(&tx->tx_list, &conn->rac_replyq);
tx->tx_qtime = jiffies;
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
}
if (more_to_do) {
kra_tx_t *tx;
unsigned long flags;
- cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+ spin_lock_irqsave(&conn->rac_lock, flags);
cfs_list_for_each(ttmp, &conn->rac_replyq) {
tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
continue;
if (tx->tx_msg.ram_type != type) {
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
CWARN("Unexpected type %x (%x expected) "
"matched reply from %s\n",
tx->tx_msg.ram_type, type,
}
cfs_list_del(&tx->tx_list);
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
return tx;
}
- cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+ spin_unlock_irqrestore(&conn->rac_lock, flags);
CWARN("Unmatched reply %02x/"LPX64" from %s\n",
type, cookie, libcfs_nid2str(conn->rac_peer->rap_nid));
return NULL;
if (msg->ram_type == RANAL_MSG_CLOSE) {
CWARN("RX CLOSE from %s\n", libcfs_nid2str(conn->rac_peer->rap_nid));
conn->rac_close_recvd = 1;
- cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+ write_lock_irqsave(&kranal_data.kra_global_lock, flags);
if (conn->rac_state == RANAL_CONN_ESTABLISHED)
kranal_close_conn_locked(conn, 0);
conn->rac_close_sent)
kranal_terminate_conn_locked(conn);
- cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+ write_unlock_irqrestore(&kranal_data.kra_global_lock,
flags);
goto out;
}
dev->rad_scheduler = current;
cfs_waitlink_init(&wait);
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
while (!kranal_data.kra_shutdown) {
/* Safe: kra_shutdown only set when quiescent */
if (busy_loops++ >= RANAL_RESCHED) {
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
cfs_cond_resched();
busy_loops = 0;
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
}
dropped_lock = 0;
if (dev->rad_ready) {
/* Device callback fired since I last checked it */
dev->rad_ready = 0;
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
dropped_lock = 1;
kranal_check_rdma_cq(dev);
kranal_check_fma_cq(dev);
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
}
cfs_list_for_each_safe(tmp, nxt, &dev->rad_ready_conns) {
cfs_list_del_init(&conn->rac_schedlist);
LASSERT (conn->rac_scheduled);
conn->rac_scheduled = 0;
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
dropped_lock = 1;
kranal_check_fma_rx(conn);
kranal_complete_closed_conn(conn);
kranal_conn_decref(conn);
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
}
nsoonest = 0;
deadline = conn->rac_last_tx + conn->rac_keepalive;
if (cfs_time_aftereq(jiffies, deadline)) {
/* Time to process this new conn */
- cfs_spin_unlock_irqrestore(&dev->rad_lock,
+ spin_unlock_irqrestore(&dev->rad_lock,
flags);
dropped_lock = 1;
rc = kranal_process_new_conn(conn);
if (rc != -EAGAIN) {
/* All done with this conn */
- cfs_spin_lock_irqsave(&dev->rad_lock,
+ spin_lock_irqsave(&dev->rad_lock,
flags);
cfs_list_del_init(&conn->rac_schedlist);
- cfs_spin_unlock_irqrestore(&dev-> \
+ spin_unlock_irqrestore(&dev-> \
rad_lock,
flags);
kranal_conn_decref(conn);
- cfs_spin_lock_irqsave(&dev->rad_lock,
+ spin_lock_irqsave(&dev->rad_lock,
flags);
continue;
}
conn->rac_keepalive += CFS_HZ;
deadline = conn->rac_last_tx + conn->rac_keepalive;
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
}
/* Does this conn need attention soonest? */
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
if (nsoonest == 0) {
busy_loops = 0;
cfs_waitq_del(&dev->rad_waitq, &wait);
cfs_set_current_state(CFS_TASK_RUNNING);
- cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+ spin_lock_irqsave(&dev->rad_lock, flags);
}
- cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+ spin_unlock_irqrestore(&dev->rad_lock, flags);
dev->rad_scheduler = NULL;
kranal_thread_fini();
CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
- cfs_spin_lock_init(&peer->ksnp_lock);
+ spin_lock_init(&peer->ksnp_lock);
- cfs_spin_lock_bh (&net->ksnn_lock);
+ spin_lock_bh(&net->ksnn_lock);
- if (net->ksnn_shutdown) {
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ if (net->ksnn_shutdown) {
+ spin_unlock_bh(&net->ksnn_lock);
- LIBCFS_FREE(peer, sizeof(*peer));
- CERROR("Can't create peer: network shutdown\n");
- return -ESHUTDOWN;
- }
+ LIBCFS_FREE(peer, sizeof(*peer));
+ CERROR("Can't create peer: network shutdown\n");
+ return -ESHUTDOWN;
+ }
- net->ksnn_npeers++;
+ net->ksnn_npeers++;
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ spin_unlock_bh(&net->ksnn_lock);
- *peerp = peer;
- return 0;
+ *peerp = peer;
+ return 0;
}
void
* until they are destroyed, so we can be assured that _all_ state to
* do with this peer has been cleaned up when its refcount drops to
* zero. */
- cfs_spin_lock_bh (&net->ksnn_lock);
- net->ksnn_npeers--;
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ spin_lock_bh(&net->ksnn_lock);
+ net->ksnn_npeers--;
+ spin_unlock_bh(&net->ksnn_lock);
}
ksock_peer_t *
{
ksock_peer_t *peer;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
- peer = ksocknal_find_peer_locked (ni, id);
- if (peer != NULL) /* +1 ref for caller? */
- ksocknal_peer_addref(peer);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ peer = ksocknal_find_peer_locked(ni, id);
+ if (peer != NULL) /* +1 ref for caller? */
+ ksocknal_peer_addref(peer);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (peer);
}
int j;
int rc = -ENOENT;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
}
}
out:
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (rc);
}
return (-ENOMEM);
}
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
/* always called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
route2->ksnr_share_count++;
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (0);
}
int i;
int rc = -ENOENT;
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
}
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_txlist_done(ni, &zombies, 1);
cfs_list_t *ctmp;
int i;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
conn = cfs_list_entry (ctmp, ksock_conn_t,
ksnc_list);
ksocknal_conn_addref(conn);
- cfs_read_unlock (&ksocknal_data. \
+ read_unlock(&ksocknal_data. \
ksnd_global_lock);
return (conn);
}
}
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (NULL);
}
int i;
int nip;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
nip = net->ksnn_ninterfaces;
LASSERT (nip <= LNET_MAX_INTERFACES);
/* Only offer interfaces for additional connections if I have
* more than one. */
if (nip < 2) {
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return 0;
}
LASSERT (ipaddrs[i] != 0);
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (nip);
}
int
ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
{
- cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
ksock_net_t *net = peer->ksnp_ni->ni_data;
ksock_interface_t *iface;
ksock_interface_t *best_iface;
/* Also note that I'm not going to return more than n_peerips
* interfaces, even if I have more myself */
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
LASSERT (n_peerips <= LNET_MAX_INTERFACES);
LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
/* Overwrite input peer IP addresses */
memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
return (n_ips);
}
__u32 *peer_ipaddrs, int npeer_ipaddrs)
{
ksock_route_t *newroute = NULL;
- cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
lnet_ni_t *ni = peer->ksnp_ni;
ksock_net_t *net = ni->ni_data;
cfs_list_t *rtmp;
* expecting to be dealing with small numbers of interfaces, so the
* O(n**3)-ness here shouldn't matter */
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
if (net->ksnn_ninterfaces < 2) {
/* Only create additional connections
* if I have > 1 interface */
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
return;
}
if (newroute != NULL) {
newroute->ksnr_ipaddr = peer_ipaddrs[i];
} else {
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
newroute = ksocknal_create_route(peer_ipaddrs[i], port);
if (newroute == NULL)
return;
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
}
if (peer->ksnp_closing) {
newroute = NULL;
}
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
if (newroute != NULL)
ksocknal_route_decref(newroute);
}
cr->ksncr_ni = ni;
cr->ksncr_sock = sock;
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
return 0;
}
ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
cfs_socket_t *sock, int type)
{
- cfs_rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
+ rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
CFS_LIST_HEAD (zombies);
lnet_process_id_t peerid;
cfs_list_t *tmp;
hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
peerid = peer->ksnp_id;
- cfs_write_lock_bh(global_lock);
+ write_lock_bh(global_lock);
conn->ksnc_proto = peer->ksnp_proto;
- cfs_write_unlock_bh(global_lock);
+ write_unlock_bh(global_lock);
if (conn->ksnc_proto == NULL) {
conn->ksnc_proto = &ksocknal_protocol_v3x;
if (active) {
ksocknal_peer_addref(peer);
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
} else {
rc = ksocknal_create_peer(&peer, ni, peerid);
if (rc != 0)
goto failed_1;
- cfs_write_lock_bh (global_lock);
+ write_lock_bh(global_lock);
/* called with a ref on ni, so shutdown can't have started */
LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
ksocknal_queue_tx_locked (tx, conn);
}
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
/* We've now got a new connection. Any errors from here on are just
* like "normal" comms errors and we close the connection normally.
if (rc == 0)
rc = ksocknal_lib_setup_sock(sock);
- cfs_write_lock_bh(global_lock);
+ write_lock_bh(global_lock);
/* NB my callbacks block while I hold ksnd_global_lock */
ksocknal_lib_set_callback(sock, conn);
if (!active)
peer->ksnp_accepting--;
- cfs_write_unlock_bh(global_lock);
+ write_unlock_bh(global_lock);
if (rc != 0) {
- cfs_write_lock_bh(global_lock);
+ write_lock_bh(global_lock);
if (!conn->ksnc_closing) {
/* could be closed by another thread */
ksocknal_close_conn_locked(conn, rc);
}
- cfs_write_unlock_bh(global_lock);
+ write_unlock_bh(global_lock);
} else if (ksocknal_connsock_addref(conn) == 0) {
/* Allow I/O to proceed. */
ksocknal_read_callback(conn);
ksocknal_unlink_peer_locked(peer);
}
- cfs_write_unlock_bh (global_lock);
+ write_unlock_bh(global_lock);
if (warn != NULL) {
if (rc < 0)
ksocknal_send_hello(ni, conn, peerid.nid, hello);
}
- cfs_write_lock_bh(global_lock);
+ write_lock_bh(global_lock);
peer->ksnp_accepting--;
- cfs_write_unlock_bh(global_lock);
+ write_unlock_bh(global_lock);
}
ksocknal_txlist_done(ni, &zombies, 1);
tx_list)
ksocknal_tx_prep(conn, tx);
- cfs_spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
- cfs_list_splice_init(&peer->ksnp_tx_queue,
- &conn->ksnc_tx_queue);
- cfs_spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
+ spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+ cfs_list_splice_init(&peer->ksnp_tx_queue,
+ &conn->ksnc_tx_queue);
+ spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
}
peer->ksnp_proto = NULL; /* renegotiate protocol version */
}
}
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- cfs_list_add_tail (&conn->ksnc_list,
- &ksocknal_data.ksnd_deathrow_conns);
- cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
+ cfs_list_add_tail(&conn->ksnc_list,
+ &ksocknal_data.ksnd_deathrow_conns);
+ cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
void
* tell LNET I think the peer is dead if it's to another kernel and
* there are no connections or connection attempts in existance. */
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
cfs_list_empty(&peer->ksnp_conns) &&
last_alive = peer->ksnp_last_alive;
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
if (notify)
lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
* abort all buffered data */
LASSERT (conn->ksnc_sock == NULL);
- cfs_spin_lock(&peer->ksnp_lock);
+ spin_lock(&peer->ksnp_lock);
cfs_list_for_each_entry_safe_typed(tx, tmp, &peer->ksnp_zc_req_list,
ksock_tx_t, tx_zc_list) {
cfs_list_add(&tx->tx_zc_list, &zlist);
}
- cfs_spin_unlock(&peer->ksnp_lock);
+ spin_unlock(&peer->ksnp_lock);
while (!cfs_list_empty(&zlist)) {
tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
LASSERT(conn->ksnc_closing);
/* wake up the scheduler to "send" all remaining packets to /dev/null */
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
/* a closing conn is always ready to tx */
conn->ksnc_tx_ready = 1;
cfs_waitq_signal (&sched->kss_waitq);
}
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- /* serialise with callbacks */
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ /* serialise with callbacks */
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
peer->ksnp_error = 0; /* avoid multiple notifications */
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (failed)
ksocknal_peer_failed(peer);
void
ksocknal_queue_zombie_conn (ksock_conn_t *conn)
{
- /* Queue the conn for the reaper to destroy */
+ /* Queue the conn for the reaper to destroy */
- LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ LASSERT(cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
- cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
- cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+ cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
+ cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
}
void
__u32 ipaddr = conn->ksnc_ipaddr;
int count;
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (count);
}
int i;
int count = 0;
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
if (id.nid != LNET_NID_ANY)
lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
}
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
/* wildcards always succeed */
if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
cfs_time_t last_alive = 0;
cfs_time_t now = cfs_time_current();
ksock_peer_t *peer = NULL;
- cfs_rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
+ rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
lnet_process_id_t id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
- cfs_read_lock(glock);
+ read_lock(glock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
connect = 0;
}
- cfs_read_unlock(glock);
+ read_unlock(glock);
if (last_alive != 0)
*when = last_alive;
ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
- cfs_write_lock_bh(glock);
+ write_lock_bh(glock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL)
ksocknal_launch_all_connections_locked(peer);
- cfs_write_unlock_bh(glock);
+ write_unlock_bh(glock);
return;
}
ksock_conn_t *conn;
for (index = 0; ; index++) {
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
i = 0;
conn = NULL;
}
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
if (conn == NULL)
break;
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
for (j = 0; ; j++) {
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
index = 0;
peer = NULL;
}
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
if (peer != NULL) {
rc = 0;
netmask == 0)
return (-EINVAL);
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
iface = ksocknal_ip2iface(ni, ipaddress);
if (iface != NULL) {
/* NB only new connections will pay attention to the new interface! */
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (rc);
}
int i;
int j;
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
}
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (rc);
}
ksock_net_t *net = ni->ni_data;
ksock_interface_t *iface;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
rc = -ENOENT;
data->ioc_u32[3] = iface->ksni_nroutes;
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return rc;
}
sizeof (cfs_list_t) *
ksocknal_data.ksnd_peer_hash_size);
- cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- cfs_list_t zlist;
- ksock_tx_t *tx;
+ if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ cfs_list_t zlist;
+ ksock_tx_t *tx;
- cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
- cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
- cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
+ cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
- while(!cfs_list_empty(&zlist)) {
- tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
- cfs_list_del(&tx->tx_list);
- LIBCFS_FREE(tx, tx->tx_desc_size);
- }
- } else {
- cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
- }
+ while (!cfs_list_empty(&zlist)) {
+ tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
+ cfs_list_del(&tx->tx_list);
+ LIBCFS_FREE(tx, tx->tx_desc_size);
+ }
+ } else {
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ }
}
void
}
i = 4;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
while (ksocknal_data.ksnd_nthreads != 0) {
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
cfs_pause(cfs_time_seconds(1));
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_free_buffers();
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
- cfs_rwlock_init(&ksocknal_data.ksnd_global_lock);
+ rwlock_init(&ksocknal_data.ksnd_global_lock);
CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
- cfs_spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
- cfs_spin_lock_init (&ksocknal_data.ksnd_connd_lock);
+ spin_lock_init(&ksocknal_data.ksnd_connd_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
- cfs_spin_lock_init (&ksocknal_data.ksnd_tx_lock);
+ spin_lock_init(&ksocknal_data.ksnd_tx_lock);
CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
/* NB memset above zeros whole of ksocknal_data */
sched = &info->ksi_scheds[nthrs - 1];
sched->kss_info = info;
- cfs_spin_lock_init(&sched->kss_lock);
+ spin_lock_init(&sched->kss_lock);
CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
}
for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
- cfs_spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_starting++;
- cfs_spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
- rc = ksocknal_thread_start (ksocknal_connd,
- (void *)((ulong_ptr_t)i));
- if (rc != 0) {
- cfs_spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- ksocknal_data.ksnd_connd_starting--;
- cfs_spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ ksocknal_data.ksnd_connd_starting++;
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+
+ rc = ksocknal_thread_start(ksocknal_connd,
+ (void *)((ulong_ptr_t)i));
+ if (rc != 0) {
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ ksocknal_data.ksnd_connd_starting--;
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
CERROR("Can't spawn socknal connd: %d\n", rc);
goto failed;
}
void
ksocknal_debug_peerhash (lnet_ni_t *ni)
{
- ksock_peer_t *peer = NULL;
- cfs_list_t *tmp;
- int i;
+ ksock_peer_t *peer = NULL;
+ cfs_list_t *tmp;
+ int i;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
}
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return;
}
LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
LASSERT(ksocknal_data.ksnd_nnets > 0);
- cfs_spin_lock_bh (&net->ksnn_lock);
- net->ksnn_shutdown = 1; /* prevent new peers */
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ spin_lock_bh(&net->ksnn_lock);
+ net->ksnn_shutdown = 1; /* prevent new peers */
+ spin_unlock_bh(&net->ksnn_lock);
- /* Delete all peers */
- ksocknal_del_peer(ni, anyid, 0);
+ /* Delete all peers */
+ ksocknal_del_peer(ni, anyid, 0);
- /* Wait for all peer state to clean up */
- i = 2;
- cfs_spin_lock_bh (&net->ksnn_lock);
- while (net->ksnn_npeers != 0) {
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ /* Wait for all peer state to clean up */
+ i = 2;
+ spin_lock_bh(&net->ksnn_lock);
+ while (net->ksnn_npeers != 0) {
+ spin_unlock_bh(&net->ksnn_lock);
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "waiting for %d peers to disconnect\n",
- net->ksnn_npeers);
- cfs_pause(cfs_time_seconds(1));
+ i++;
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+ "waiting for %d peers to disconnect\n",
+ net->ksnn_npeers);
+ cfs_pause(cfs_time_seconds(1));
- ksocknal_debug_peerhash(ni);
+ ksocknal_debug_peerhash(ni);
- cfs_spin_lock_bh (&net->ksnn_lock);
- }
- cfs_spin_unlock_bh (&net->ksnn_lock);
+ spin_lock_bh(&net->ksnn_lock);
+ }
+ spin_unlock_bh(&net->ksnn_lock);
for (i = 0; i < net->ksnn_ninterfaces; i++) {
LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
if (net == NULL)
goto fail_0;
- cfs_spin_lock_init(&net->ksnn_lock);
+ spin_lock_init(&net->ksnn_lock);
net->ksnn_incarnation = ksocknal_new_incarnation();
ni->ni_data = net;
ni->ni_peertimeout = *ksocknal_tunables.ksnd_peertimeout;
typedef struct /* per scheduler state */
{
- cfs_spinlock_t kss_lock; /* serialise */
+ spinlock_t kss_lock; /* serialise */
cfs_list_t kss_rx_conns; /* conn waiting to be read */
/* conn waiting to be written */
cfs_list_t kss_tx_conns;
typedef struct
{
- __u64 ksnn_incarnation; /* my epoch */
- cfs_spinlock_t ksnn_lock; /* serialise */
+ __u64 ksnn_incarnation; /* my epoch */
+ spinlock_t ksnn_lock; /* serialise */
cfs_list_t ksnn_list; /* chain on global list */
- int ksnn_npeers; /* # peers */
- int ksnn_shutdown; /* shutting down? */
- int ksnn_ninterfaces; /* IP interfaces */
- ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
+ int ksnn_npeers; /* # peers */
+ int ksnn_shutdown; /* shutting down? */
+ int ksnn_ninterfaces; /* IP interfaces */
+ ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
} ksock_net_t;
/** connd timeout */
int ksnd_nnets; /* # networks set up */
cfs_list_t ksnd_nets; /* list of nets */
/* stabilize peer/conn ops */
- cfs_rwlock_t ksnd_global_lock;
+ rwlock_t ksnd_global_lock;
/* hash table of all my known peers */
cfs_list_t *ksnd_peers;
int ksnd_peer_hash_size; /* size of ksnd_peers */
cfs_list_t ksnd_enomem_conns; /* conns to retry: reaper_lock*/
cfs_waitq_t ksnd_reaper_waitq; /* reaper sleeps here */
cfs_time_t ksnd_reaper_waketime;/* when reaper will wake */
- cfs_spinlock_t ksnd_reaper_lock; /* serialise */
+ spinlock_t ksnd_reaper_lock; /* serialise */
int ksnd_enomem_tx; /* test ENOMEM sender */
int ksnd_stall_tx; /* test sluggish sender */
long ksnd_connd_starting_stamp;
/** # running connd */
unsigned ksnd_connd_running;
- cfs_spinlock_t ksnd_connd_lock; /* serialise */
+ spinlock_t ksnd_connd_lock; /* serialise */
- cfs_list_t ksnd_idle_noop_txs; /* list head for freed noop tx */
- cfs_spinlock_t ksnd_tx_lock; /* serialise, NOT safe in g_lock */
+ cfs_list_t ksnd_idle_noop_txs; /* list head for freed noop tx */
+ spinlock_t ksnd_tx_lock; /* serialise, g_lock unsafe */
} ksock_nal_data_t;
cfs_list_t ksnp_conns; /* all active connections */
cfs_list_t ksnp_routes; /* routes */
cfs_list_t ksnp_tx_queue; /* waiting packets */
- cfs_spinlock_t ksnp_lock; /* serialize, NOT safe in g_lock */
+ spinlock_t ksnp_lock; /* serialize, g_lock unsafe */
cfs_list_t ksnp_zc_req_list; /* zero copy requests wait for ACK */
cfs_time_t ksnp_send_keepalive; /* time to send keepalive */
lnet_ni_t *ksnp_ni; /* which network */
{
int rc = -ESHUTDOWN;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
- if (!conn->ksnc_closing) {
- LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
- cfs_atomic_inc(&conn->ksnc_sock_refcount);
- rc = 0;
- }
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ if (!conn->ksnc_closing) {
+ LASSERT(cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
+ cfs_atomic_inc(&conn->ksnc_sock_refcount);
+ rc = 0;
+ }
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return (rc);
}
ksock_tx_t *
ksocknal_alloc_tx(int type, int size)
{
- ksock_tx_t *tx = NULL;
+ ksock_tx_t *tx = NULL;
- if (type == KSOCK_MSG_NOOP) {
- LASSERT (size == KSOCK_NOOP_TX_SIZE);
+ if (type == KSOCK_MSG_NOOP) {
+ LASSERT(size == KSOCK_NOOP_TX_SIZE);
- /* searching for a noop tx in free list */
- cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+ /* searching for a noop tx in free list */
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = cfs_list_entry(ksocknal_data.ksnd_idle_noop_txs. \
- next, ksock_tx_t, tx_list);
- LASSERT(tx->tx_desc_size == size);
- cfs_list_del(&tx->tx_list);
- }
+ if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+ tx = cfs_list_entry(ksocknal_data.ksnd_idle_noop_txs. \
+ next, ksock_tx_t, tx_list);
+ LASSERT(tx->tx_desc_size == size);
+ cfs_list_del(&tx->tx_list);
+ }
- cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
}
if (tx == NULL)
void
ksocknal_free_tx (ksock_tx_t *tx)
{
- cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+ cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
- if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
- /* it's a noop tx */
- cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+ if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
+ /* it's a noop tx */
+ spin_lock(&ksocknal_data.ksnd_tx_lock);
- cfs_list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
+ cfs_list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
- cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
- } else {
- LIBCFS_FREE(tx, tx->tx_desc_size);
- }
+ spin_unlock(&ksocknal_data.ksnd_tx_lock);
+ } else {
+ LIBCFS_FREE(tx, tx->tx_desc_size);
+ }
}
int
ksocknal_tx_addref(tx);
- cfs_spin_lock(&peer->ksnp_lock);
+ spin_lock(&peer->ksnp_lock);
/* ZC_REQ is going to be pinned to the peer */
tx->tx_deadline =
cfs_list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
- cfs_spin_unlock(&peer->ksnp_lock);
+ spin_unlock(&peer->ksnp_lock);
}
static void
ksocknal_uncheck_zc_req(ksock_tx_t *tx)
{
- ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
+ ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
- LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
- LASSERT (tx->tx_zc_capable);
+ LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
+ LASSERT(tx->tx_zc_capable);
- tx->tx_zc_checked = 0;
+ tx->tx_zc_checked = 0;
- cfs_spin_lock(&peer->ksnp_lock);
+ spin_lock(&peer->ksnp_lock);
- if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
- /* Not waiting for an ACK */
- cfs_spin_unlock(&peer->ksnp_lock);
- return;
- }
+ if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+ /* Not waiting for an ACK */
+ spin_unlock(&peer->ksnp_lock);
+ return;
+ }
- tx->tx_msg.ksm_zc_cookies[0] = 0;
- cfs_list_del(&tx->tx_zc_list);
+ tx->tx_msg.ksm_zc_cookies[0] = 0;
+ cfs_list_del(&tx->tx_zc_list);
- cfs_spin_unlock(&peer->ksnp_lock);
+ spin_unlock(&peer->ksnp_lock);
- ksocknal_tx_decref(tx);
+ ksocknal_tx_decref(tx);
}
int
counter, conn, cfs_atomic_read(&libcfs_kmemory));
/* Queue on ksnd_enomem_conns for retry after a timeout */
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
/* enomem list takes over scheduler's ref... */
LASSERT (conn->ksnc_tx_scheduled);
ksocknal_data.ksnd_reaper_waketime))
cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
return (rc);
}
route->ksnr_scheduled = 1; /* scheduling conn for connd */
ksocknal_route_addref(route); /* extra ref for connd */
- cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
- cfs_list_add_tail (&route->ksnr_connd_list,
- &ksocknal_data.ksnd_connd_routes);
- cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
+ cfs_list_add_tail(&route->ksnr_connd_list,
+ &ksocknal_data.ksnd_connd_routes);
+ cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
}
void
* but they're used inside spinlocks a lot.
*/
bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
if (cfs_list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
/* First packet starts the timeout */
cfs_waitq_signal (&sched->kss_waitq);
}
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
}
{
ksock_peer_t *peer;
ksock_conn_t *conn;
- cfs_rwlock_t *g_lock;
+ rwlock_t *g_lock;
int retry;
int rc;
g_lock = &ksocknal_data.ksnd_global_lock;
for (retry = 0;; retry = 1) {
- cfs_read_lock (g_lock);
+ read_lock(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL) {
if (ksocknal_find_connectable_route_locked(peer) == NULL) {
* connecting and I do have an actual
* connection... */
ksocknal_queue_tx_locked (tx, conn);
- cfs_read_unlock (g_lock);
+ read_unlock(g_lock);
return (0);
}
}
}
/* I'll need a write lock... */
- cfs_read_unlock (g_lock);
+ read_unlock(g_lock);
- cfs_write_lock_bh (g_lock);
+ write_lock_bh(g_lock);
peer = ksocknal_find_peer_locked(ni, id);
if (peer != NULL)
break;
- cfs_write_unlock_bh (g_lock);
+ write_unlock_bh(g_lock);
if ((id.pid & LNET_PID_USERFLAG) != 0) {
CERROR("Refusing to create a connection to "
if (conn != NULL) {
/* Connection exists; queue message on it */
ksocknal_queue_tx_locked (tx, conn);
- cfs_write_unlock_bh (g_lock);
+ write_unlock_bh(g_lock);
return (0);
}
/* Queue the message until a connection is established */
cfs_list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
- cfs_write_unlock_bh (g_lock);
+ write_unlock_bh(g_lock);
return 0;
}
- cfs_write_unlock_bh (g_lock);
+ write_unlock_bh(g_lock);
/* NB Routes may be ignored if connections to them failed recently */
CNETERR("No usable routes to %s\n", libcfs_id2str(id));
if (pid < 0)
return ((int)pid);
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads++;
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return (0);
}
void
ksocknal_thread_fini (void)
{
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_data.ksnd_nthreads--;
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
}
int
LASSERT (conn->ksnc_rx_scheduled);
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
switch (conn->ksnc_rx_state) {
case SOCKNAL_RX_PARSE_WAIT:
conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
- cfs_spin_unlock_bh (&sched->kss_lock);
- ksocknal_conn_decref(conn);
- return (0);
+ spin_unlock_bh(&sched->kss_lock);
+ ksocknal_conn_decref(conn);
+ return 0;
}
static inline int
ksocknal_sched_cansleep(ksock_sched_t *sched)
{
- int rc;
+ int rc;
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
- rc = (!ksocknal_data.ksnd_shuttingdown &&
- cfs_list_empty(&sched->kss_rx_conns) &&
- cfs_list_empty(&sched->kss_tx_conns));
+ rc = (!ksocknal_data.ksnd_shuttingdown &&
+ cfs_list_empty(&sched->kss_rx_conns) &&
+ cfs_list_empty(&sched->kss_tx_conns));
- cfs_spin_unlock_bh (&sched->kss_lock);
- return (rc);
+ spin_unlock_bh(&sched->kss_lock);
+ return rc;
}
int ksocknal_scheduler(void *arg)
name, info->ksi_cpt, rc);
}
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
int did_something = 0;
* data_ready can set it any time after we release
* kss_lock. */
conn->ksnc_rx_ready = 0;
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- rc = ksocknal_process_receive(conn);
+ rc = ksocknal_process_receive(conn);
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
/* I'm the only one that can clear this flag */
LASSERT(conn->ksnc_rx_scheduled);
* write_space can set it any time after we release
* kss_lock. */
conn->ksnc_tx_ready = 0;
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
if (!cfs_list_empty(&zlist)) {
/* free zombie noop txs, it's fast because
if (rc == -ENOMEM || rc == -EAGAIN) {
/* Incomplete send: replace tx on HEAD of tx_queue */
- cfs_spin_lock_bh (&sched->kss_lock);
- cfs_list_add (&tx->tx_list,
- &conn->ksnc_tx_queue);
- } else {
- /* Complete send; tx -ref */
- ksocknal_tx_decref (tx);
-
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
+ cfs_list_add(&tx->tx_list,
+ &conn->ksnc_tx_queue);
+ } else {
+ /* Complete send; tx -ref */
+ ksocknal_tx_decref(tx);
+
+ spin_lock_bh(&sched->kss_lock);
/* assume space for more */
conn->ksnc_tx_ready = 1;
}
}
if (!did_something || /* nothing to do */
++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
nloops = 0;
cfs_cond_resched();
}
- cfs_spin_lock_bh (&sched->kss_lock);
- }
- }
+ spin_lock_bh(&sched->kss_lock);
+ }
+ }
- cfs_spin_unlock_bh (&sched->kss_lock);
- ksocknal_thread_fini ();
- return (0);
+ spin_unlock_bh(&sched->kss_lock);
+ ksocknal_thread_fini();
+ return 0;
}
/*
*/
void ksocknal_read_callback (ksock_conn_t *conn)
{
- ksock_sched_t *sched;
- ENTRY;
+ ksock_sched_t *sched;
+ ENTRY;
- sched = conn->ksnc_scheduler;
+ sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
conn->ksnc_rx_ready = 1;
cfs_waitq_signal (&sched->kss_waitq);
}
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- EXIT;
+ EXIT;
}
/*
*/
void ksocknal_write_callback (ksock_conn_t *conn)
{
- ksock_sched_t *sched;
- ENTRY;
+ ksock_sched_t *sched;
+ ENTRY;
- sched = conn->ksnc_scheduler;
+ sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
conn->ksnc_tx_ready = 1;
cfs_waitq_signal (&sched->kss_waitq);
}
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- EXIT;
+ EXIT;
}
ksock_proto_t *
deadline = cfs_time_add(cfs_time_current(),
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
LASSERT (route->ksnr_scheduled);
LASSERT (!route->ksnr_connecting);
type = SOCKLND_CONN_BULK_OUT;
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
if (cfs_time_aftereq(cfs_time_current(), deadline)) {
rc = -ETIMEDOUT;
CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
libcfs_nid2str(peer->ksnp_id.nid));
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
}
route->ksnr_scheduled = 0;
ksocknal_launch_connection_locked(route);
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
return retry_later;
failed:
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
route->ksnr_scheduled = 0;
route->ksnr_connecting = 0;
cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
}
#endif
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_peer_failed(peer);
ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
ksocknal_data.ksnd_connd_starting_stamp = sec;
ksocknal_data.ksnd_connd_starting++;
- cfs_spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
- /* NB: total is the next id */
- rc = ksocknal_thread_start(ksocknal_connd, (void *)((long)total));
+ /* NB: total is the next id */
+ rc = ksocknal_thread_start(ksocknal_connd, (void *)((long)total));
- cfs_spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
if (rc == 0)
return 1;
int
ksocknal_connd (void *arg)
{
- cfs_spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
+ spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
long id = (long)(long_ptr_t)arg;
char name[16];
ksock_connreq_t *cr;
cfs_waitlink_init (&wait);
- cfs_spin_lock_bh (connd_lock);
+ spin_lock_bh(connd_lock);
LASSERT(ksocknal_data.ksnd_connd_starting > 0);
ksocknal_data.ksnd_connd_starting--;
next, ksock_connreq_t, ksncr_list);
cfs_list_del(&cr->ksncr_list);
- cfs_spin_unlock_bh(connd_lock);
- dropped_lock = 1;
+ spin_unlock_bh(connd_lock);
+ dropped_lock = 1;
- ksocknal_create_conn(cr->ksncr_ni, NULL,
- cr->ksncr_sock, SOCKLND_CONN_NONE);
- lnet_ni_decref(cr->ksncr_ni);
- LIBCFS_FREE(cr, sizeof(*cr));
+ ksocknal_create_conn(cr->ksncr_ni, NULL,
+ cr->ksncr_sock, SOCKLND_CONN_NONE);
+ lnet_ni_decref(cr->ksncr_ni);
+ LIBCFS_FREE(cr, sizeof(*cr));
- cfs_spin_lock_bh(connd_lock);
+ spin_lock_bh(connd_lock);
}
/* Only handle an outgoing connection request if there
if (route != NULL) {
cfs_list_del (&route->ksnr_connd_list);
ksocknal_data.ksnd_connd_connecting++;
- cfs_spin_unlock_bh(connd_lock);
+ spin_unlock_bh(connd_lock);
dropped_lock = 1;
if (ksocknal_connect(route)) {
ksocknal_route_decref(route);
- cfs_spin_lock_bh(connd_lock);
- ksocknal_data.ksnd_connd_connecting--;
- }
-
- if (dropped_lock) {
- if (++nloops < SOCKNAL_RESCHED)
- continue;
- cfs_spin_unlock_bh(connd_lock);
- nloops = 0;
- cfs_cond_resched();
- cfs_spin_lock_bh(connd_lock);
- continue;
- }
-
- /* Nothing to do for 'timeout' */
- cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
- cfs_waitq_add_exclusive (&ksocknal_data.ksnd_connd_waitq,
- &wait);
- cfs_spin_unlock_bh(connd_lock);
-
- nloops = 0;
- cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
-
- cfs_set_current_state (CFS_TASK_RUNNING);
- cfs_waitq_del (&ksocknal_data.ksnd_connd_waitq, &wait);
- cfs_spin_lock_bh(connd_lock);
- }
- ksocknal_data.ksnd_connd_running--;
- cfs_spin_unlock_bh(connd_lock);
+ spin_lock_bh(connd_lock);
+ ksocknal_data.ksnd_connd_connecting--;
+ }
+
+ if (dropped_lock) {
+ if (++nloops < SOCKNAL_RESCHED)
+ continue;
+ spin_unlock_bh(connd_lock);
+ nloops = 0;
+ cfs_cond_resched();
+ spin_lock_bh(connd_lock);
+ continue;
+ }
+
+ /* Nothing to do for 'timeout' */
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+ cfs_waitq_add_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ spin_unlock_bh(connd_lock);
+
+ nloops = 0;
+ cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+
+ cfs_set_current_state(CFS_TASK_RUNNING);
+ cfs_waitq_del(&ksocknal_data.ksnd_connd_waitq, &wait);
+ spin_lock_bh(connd_lock);
+ }
+ ksocknal_data.ksnd_connd_running--;
+ spin_unlock_bh(connd_lock);
- ksocknal_thread_fini ();
- return (0);
+ ksocknal_thread_fini();
+ return 0;
}
ksock_conn_t *
ksock_tx_t *tx;
CFS_LIST_HEAD (stale_txs);
- cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+ write_lock_bh(&ksocknal_data.ksnd_global_lock);
while (!cfs_list_empty (&peer->ksnp_tx_queue)) {
tx = cfs_list_entry (peer->ksnp_tx_queue.next,
cfs_list_add_tail (&tx->tx_list, &stale_txs);
}
- cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+ write_unlock_bh(&ksocknal_data.ksnd_global_lock);
ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
}
if (conn != NULL) {
sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
- if (!cfs_list_empty(&conn->ksnc_tx_queue)) {
- cfs_spin_unlock_bh(&sched->kss_lock);
- /* there is an queued ACK, don't need keepalive */
- return 0;
- }
+ spin_lock_bh(&sched->kss_lock);
+ if (!cfs_list_empty(&conn->ksnc_tx_queue)) {
+ spin_unlock_bh(&sched->kss_lock);
+ /* there is an queued ACK, don't need keepalive */
+ return 0;
+ }
- cfs_spin_unlock_bh(&sched->kss_lock);
- }
+ spin_unlock_bh(&sched->kss_lock);
+ }
- cfs_read_unlock(&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
- /* cookie = 1 is reserved for keepalive PING */
- tx = ksocknal_alloc_tx_noop(1, 1);
- if (tx == NULL) {
- cfs_read_lock(&ksocknal_data.ksnd_global_lock);
- return -ENOMEM;
- }
+ /* cookie = 1 is reserved for keepalive PING */
+ tx = ksocknal_alloc_tx_noop(1, 1);
+ if (tx == NULL) {
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ return -ENOMEM;
+ }
- if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
- cfs_read_lock(&ksocknal_data.ksnd_global_lock);
- return 1;
- }
+ if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
+ read_lock(&ksocknal_data.ksnd_global_lock);
+ return 1;
+ }
- ksocknal_free_tx(tx);
- cfs_read_lock(&ksocknal_data.ksnd_global_lock);
+ ksocknal_free_tx(tx);
+ read_lock(&ksocknal_data.ksnd_global_lock);
- return -EIO;
+ return -EIO;
}
/* NB. We expect to have a look at all the peers and not find any
* connections to time out, so we just use a shared lock while we
* take a look... */
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
cfs_list_for_each_entry_typed(peer, peers, ksock_peer_t, ksnp_list) {
cfs_time_t deadline = 0;
int n = 0;
if (ksocknal_send_keepalive_locked(peer) != 0) {
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
goto again;
}
conn = ksocknal_find_timed_out_conn (peer);
if (conn != NULL) {
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
tx->tx_deadline)) {
ksocknal_peer_addref(peer);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
ksocknal_flush_stale_txs(peer);
if (cfs_list_empty(&peer->ksnp_zc_req_list))
continue;
- cfs_spin_lock(&peer->ksnp_lock);
+ spin_lock(&peer->ksnp_lock);
cfs_list_for_each_entry_typed(tx, &peer->ksnp_zc_req_list,
ksock_tx_t, tx_zc_list) {
if (!cfs_time_aftereq(cfs_time_current(),
}
if (n == 0) {
- cfs_spin_unlock(&peer->ksnp_lock);
+ spin_unlock(&peer->ksnp_lock);
continue;
}
conn = tx->tx_conn;
ksocknal_conn_addref(conn);
- cfs_spin_unlock(&peer->ksnp_lock);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ spin_unlock(&peer->ksnp_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
CERROR("Total %d stale ZC_REQs for peer %s detected; the "
"oldest(%p) timed out %ld secs ago, "
goto again;
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
}
int
CFS_INIT_LIST_HEAD(&enomem_conns);
cfs_waitlink_init (&wait);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
ksock_conn_t, ksnc_list);
cfs_list_del (&conn->ksnc_list);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- ksocknal_terminate_conn (conn);
- ksocknal_conn_decref(conn);
+ ksocknal_terminate_conn(conn);
+ ksocknal_conn_decref(conn);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
next, ksock_conn_t, ksnc_list);
cfs_list_del (&conn->ksnc_list);
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- ksocknal_destroy_conn (conn);
+ ksocknal_destroy_conn(conn);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
cfs_list_del_init(&ksocknal_data.ksnd_enomem_conns);
}
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
- LASSERT (conn->ksnc_tx_scheduled);
- conn->ksnc_tx_ready = 1;
- cfs_list_add_tail(&conn->ksnc_tx_list,
- &sched->kss_tx_conns);
- cfs_waitq_signal (&sched->kss_waitq);
+ LASSERT(conn->ksnc_tx_scheduled);
+ conn->ksnc_tx_ready = 1;
+ cfs_list_add_tail(&conn->ksnc_tx_list,
+ &sched->kss_tx_conns);
+ cfs_waitq_signal(&sched->kss_waitq);
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
nenomem_conns++;
}
cfs_set_current_state (CFS_TASK_RUNNING);
cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
- cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
- }
+ spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+ }
- cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+ spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
- ksocknal_thread_fini ();
- return (0);
+ ksocknal_thread_fini();
+ return 0;
}
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
} else
ksocknal_read_callback(conn);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
EXIT;
}
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
conn = sk->sk_user_data;
wspace = SOCKNAL_WSPACE(sk);
LASSERT (sk->sk_write_space != &ksocknal_write_space);
sk->sk_write_space (sk);
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
return;
}
clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
}
void
int
ksocknal_lib_memory_pressure(ksock_conn_t *conn)
{
- int rc = 0;
- ksock_sched_t *sched;
-
- sched = conn->ksnc_scheduler;
- cfs_spin_lock_bh (&sched->kss_lock);
+ int rc = 0;
+ ksock_sched_t *sched;
+
+ sched = conn->ksnc_scheduler;
+ spin_lock_bh(&sched->kss_lock);
if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
!conn->ksnc_tx_ready) {
rc = -ENOMEM;
}
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- return rc;
+ return rc;
}
ks_get_tconn(tconn);
- cfs_spin_lock(&tconn->kstc_lock);
+ spin_lock(&tconn->kstc_lock);
if (tconn->kstc_type == kstt_sender) {
nagle = tconn->sender.kstc_info.nagle;
tconn->sender.kstc_info.nagle = 0;
tconn->child.kstc_info.nagle = 0;
}
- cfs_spin_unlock(&tconn->kstc_lock);
+ spin_unlock(&tconn->kstc_lock);
val = 1;
rc = ks_set_tcp_option(
);
LASSERT (rc == 0);
- cfs_spin_lock(&tconn->kstc_lock);
-
- if (tconn->kstc_type == kstt_sender) {
- tconn->sender.kstc_info.nagle = nagle;
- } else {
- LASSERT(tconn->kstc_type == kstt_child);
- tconn->child.kstc_info.nagle = nagle;
- }
- cfs_spin_unlock(&tconn->kstc_lock);
+ spin_lock(&tconn->kstc_lock);
+
+ if (tconn->kstc_type == kstt_sender) {
+ tconn->sender.kstc_info.nagle = nagle;
+ } else {
+ LASSERT(tconn->kstc_type == kstt_child);
+ tconn->child.kstc_info.nagle = nagle;
+ }
+ spin_unlock(&tconn->kstc_lock);
ks_put_tconn(tconn);
}
{
ksock_conn_t * conn = (ksock_conn_t *) sock->kstc_conn;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
if (mode) {
ksocknal_write_callback(conn);
} else {
ksocknal_read_callback(conn);
}
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
}
void
ksocknal_tx_fini_callback(ksock_conn_t * conn, ksock_tx_t * tx)
{
- /* remove tx/conn from conn's outgoing queue */
- cfs_spin_lock_bh (&conn->ksnc_scheduler->kss_lock);
- cfs_list_del(&tx->tx_list);
- if (cfs_list_empty(&conn->ksnc_tx_queue)) {
- cfs_list_del (&conn->ksnc_tx_list);
- }
- cfs_spin_unlock_bh (&conn->ksnc_scheduler->kss_lock);
+ /* remove tx/conn from conn's outgoing queue */
+ spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+ cfs_list_del(&tx->tx_list);
+ if (cfs_list_empty(&conn->ksnc_tx_queue))
+ cfs_list_del(&conn->ksnc_tx_list);
+
+ spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
- /* complete send; tx -ref */
- ksocknal_tx_decref (tx);
+ /* complete send; tx -ref */
+ ksocknal_tx_decref(tx);
}
void
static int
ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
{
- ksock_peer_t *peer = c->ksnc_peer;
- ksock_conn_t *conn;
- ksock_tx_t *tx;
- int rc;
+ ksock_peer_t *peer = c->ksnc_peer;
+ ksock_conn_t *conn;
+ ksock_tx_t *tx;
+ int rc;
- cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+ read_lock(&ksocknal_data.ksnd_global_lock);
- conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
- if (conn != NULL) {
- ksock_sched_t *sched = conn->ksnc_scheduler;
+ conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
+ if (conn != NULL) {
+ ksock_sched_t *sched = conn->ksnc_scheduler;
- LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+ LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
- cfs_spin_lock_bh (&sched->kss_lock);
+ spin_lock_bh(&sched->kss_lock);
- rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
+ rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
- cfs_spin_unlock_bh (&sched->kss_lock);
+ spin_unlock_bh(&sched->kss_lock);
- if (rc) { /* piggybacked */
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
- return 0;
- }
- }
+ if (rc) { /* piggybacked */
+ read_unlock(&ksocknal_data.ksnd_global_lock);
+ return 0;
+ }
+ }
- cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
/* ACK connection is not ready, or can't piggyback the ACK */
tx = ksocknal_alloc_tx_noop(cookie, !!remote);
return count == 1 ? 0 : -EPROTO;
}
- cfs_spin_lock(&peer->ksnp_lock);
+ spin_lock(&peer->ksnp_lock);
cfs_list_for_each_entry_safe(tx, tmp,
&peer->ksnp_zc_req_list, tx_zc_list) {
}
}
- cfs_spin_unlock(&peer->ksnp_lock);
+ spin_unlock(&peer->ksnp_lock);
while (!cfs_list_empty(&zlist)) {
tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
int pta_shutdown;
cfs_socket_t *pta_sock;
#ifdef __KERNEL__
- cfs_completion_t pta_signal;
+ struct completion pta_signal;
#else
- cfs_mt_completion_t pta_signal;
+ mt_completion_t pta_signal;
#endif
} lnet_acceptor_state;
#ifdef __KERNEL__
-#define cfs_mt_init_completion(c) cfs_init_completion(c)
-#define cfs_mt_wait_for_completion(c) cfs_wait_for_completion(c)
-#define cfs_mt_complete(c) cfs_complete(c)
-#define cfs_mt_fini_completion(c) cfs_fini_completion(c)
+#define mt_init_completion(c) init_completion(c)
+#define mt_wait_for_completion(c) wait_for_completion(c)
+#define mt_complete(c) complete(c)
+#define mt_fini_completion(c) fini_completion(c)
EXPORT_SYMBOL(lnet_acceptor_port);
/* set init status and unblock parent */
lnet_acceptor_state.pta_shutdown = rc;
- cfs_mt_complete(&lnet_acceptor_state.pta_signal);
+ mt_complete(&lnet_acceptor_state.pta_signal);
if (rc != 0)
return rc;
CDEBUG(D_NET, "Acceptor stopping\n");
/* unblock lnet_acceptor_stop() */
- cfs_mt_complete(&lnet_acceptor_state.pta_signal);
+ mt_complete(&lnet_acceptor_state.pta_signal);
return 0;
}
return 0;
#endif
- cfs_mt_init_completion(&lnet_acceptor_state.pta_signal);
+ mt_init_completion(&lnet_acceptor_state.pta_signal);
rc = accept2secure(accept_type, &secure);
if (rc <= 0) {
- cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+ mt_fini_completion(&lnet_acceptor_state.pta_signal);
return rc;
}
rc2 = cfs_create_thread(lnet_acceptor, (void *)(ulong_ptr_t)secure, 0);
if (rc2 < 0) {
CERROR("Can't start acceptor thread: %d\n", rc);
- cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+ mt_fini_completion(&lnet_acceptor_state.pta_signal);
return -ESRCH;
}
/* wait for acceptor to startup */
- cfs_mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
+ mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
if (!lnet_acceptor_state.pta_shutdown) {
/* started OK */
}
LASSERT (lnet_acceptor_state.pta_sock == NULL);
- cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+ mt_fini_completion(&lnet_acceptor_state.pta_signal);
return -ENETDOWN;
}
libcfs_sock_abort_accept(lnet_acceptor_state.pta_sock);
/* block until acceptor signals exit */
- cfs_mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
+ mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
- cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+ mt_fini_completion(&lnet_acceptor_state.pta_signal);
}
#else /* single-threaded user-space */
void
lnet_init_locks(void)
{
- cfs_spin_lock_init(&the_lnet.ln_eq_wait_lock);
+ spin_lock_init(&the_lnet.ln_eq_wait_lock);
cfs_waitq_init(&the_lnet.ln_eq_waitq);
- cfs_mutex_init(&the_lnet.ln_lnd_mutex);
- cfs_mutex_init(&the_lnet.ln_api_mutex);
+ mutex_init(&the_lnet.ln_lnd_mutex);
+ mutex_init(&the_lnet.ln_api_mutex);
}
void
}
#ifdef __KERNEL__
- cfs_spin_lock_init(&ni->ni_lock);
+ spin_lock_init(&ni->ni_lock);
#else
# ifdef HAVE_LIBPTHREAD
pthread_mutex_init(&ni->ni_lock, NULL);
CFS_INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
CFS_INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
#ifdef __KERNEL__
- cfs_spin_lock_init(&ptl->ptl_lock);
+ spin_lock_init(&ptl->ptl_lock);
#else
# ifdef HAVE_LIBPTHREAD
pthread_mutex_init(&ptl->ptl_lock, NULL);
CFS_MODULE_PARM(config_on_load, "i", int, 0444,
"configure network at module load");
-static cfs_mutex_t lnet_config_mutex;
+static struct mutex lnet_config_mutex;
int
lnet_configure (void *arg)
int rc;
ENTRY;
- cfs_mutex_init(&lnet_config_mutex);
+ mutex_init(&lnet_config_mutex);
rc = LNetInit();
if (rc != 0) {
return 0;
#ifdef __KERNEL__
- cfs_sema_init(&the_lnet.ln_rc_signal, 0);
+ sema_init(&the_lnet.ln_rc_signal, 0);
/* EQ size doesn't matter; the callback is guaranteed to get every
* event */
eqsz = 0;
if (rc < 0) {
CERROR("Can't start router checker thread: %d\n", rc);
/* block until event callback signals exit */
- cfs_down(&the_lnet.ln_rc_signal);
+ down(&the_lnet.ln_rc_signal);
rc = LNetEQFree(the_lnet.ln_rc_eqh);
LASSERT (rc == 0);
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
#ifdef __KERNEL__
/* block until event callback signals exit */
- cfs_down(&the_lnet.ln_rc_signal);
+ down(&the_lnet.ln_rc_signal);
#else
lnet_router_checker();
#endif
lnet_prune_rc_data(1); /* wait for UNLINK */
the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
- cfs_up(&the_lnet.ln_rc_signal);
+ up(&the_lnet.ln_rc_signal);
/* The unlink event callback will signal final completion */
return 0;
}
return -EFAULT;
}
- cfs_mutex_lock(&console_session.ses_mutex);
+ mutex_lock(&console_session.ses_mutex);
console_session.ses_laststamp = cfs_time_current_sec();
sizeof(lstcon_trans_stat_t)))
rc = -EFAULT;
out:
- cfs_mutex_unlock(&console_session.ses_mutex);
+ mutex_unlock(&console_session.ses_mutex);
LIBCFS_FREE(buf, data->ioc_plen1);
static void
lstcon_rpc_done(srpc_client_rpc_t *rpc)
{
- lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
+ lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
- LASSERT (crpc != NULL && rpc == crpc->crp_rpc);
- LASSERT (crpc->crp_posted && !crpc->crp_finished);
+ LASSERT(crpc != NULL && rpc == crpc->crp_rpc);
+ LASSERT(crpc->crp_posted && !crpc->crp_finished);
- cfs_spin_lock(&rpc->crpc_lock);
+ spin_lock(&rpc->crpc_lock);
- if (crpc->crp_trans == NULL) {
- /* Orphan RPC is not in any transaction,
- * I'm just a poor body and nobody loves me */
- cfs_spin_unlock(&rpc->crpc_lock);
+ if (crpc->crp_trans == NULL) {
+ /* Orphan RPC is not in any transaction,
+ * I'm just a poor body and nobody loves me */
+ spin_unlock(&rpc->crpc_lock);
/* release it */
lstcon_rpc_put(crpc);
if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
cfs_waitq_signal(&crpc->crp_trans->tas_waitq);
- cfs_spin_unlock(&rpc->crpc_lock);
+ spin_unlock(&rpc->crpc_lock);
}
int
lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
{
- lstcon_rpc_t *crpc = NULL;
- int rc;
+ lstcon_rpc_t *crpc = NULL;
+ int rc;
- cfs_spin_lock(&console_session.ses_rpc_lock);
+ spin_lock(&console_session.ses_rpc_lock);
- if (!cfs_list_empty(&console_session.ses_rpc_freelist)) {
- crpc = cfs_list_entry(console_session.ses_rpc_freelist.next,
- lstcon_rpc_t, crp_link);
- cfs_list_del_init(&crpc->crp_link);
- }
+ if (!cfs_list_empty(&console_session.ses_rpc_freelist)) {
+ crpc = cfs_list_entry(console_session.ses_rpc_freelist.next,
+ lstcon_rpc_t, crp_link);
+ cfs_list_del_init(&crpc->crp_link);
+ }
- cfs_spin_unlock(&console_session.ses_rpc_lock);
+ spin_unlock(&console_session.ses_rpc_lock);
- if (crpc == NULL) {
- LIBCFS_ALLOC(crpc, sizeof(*crpc));
- if (crpc == NULL)
- return -ENOMEM;
- }
+ if (crpc == NULL) {
+ LIBCFS_ALLOC(crpc, sizeof(*crpc));
+ if (crpc == NULL)
+ return -ENOMEM;
+ }
- rc = lstcon_rpc_init(nd, service, feats,
- bulk_npg, bulk_len, 0, crpc);
+ rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc);
if (rc == 0) {
*crpcpp = crpc;
return 0;
memset(crpc, 0, sizeof(*crpc));
crpc->crp_embedded = 1;
- } else {
- cfs_spin_lock(&console_session.ses_rpc_lock);
+ } else {
+ spin_lock(&console_session.ses_rpc_lock);
- cfs_list_add(&crpc->crp_link,
- &console_session.ses_rpc_freelist);
+ cfs_list_add(&crpc->crp_link,
+ &console_session.ses_rpc_freelist);
- cfs_spin_unlock(&console_session.ses_rpc_lock);
- }
+ spin_unlock(&console_session.ses_rpc_lock);
+ }
- /* RPC is not alive now */
- cfs_atomic_dec(&console_session.ses_rpc_counter);
+ /* RPC is not alive now */
+ cfs_atomic_dec(&console_session.ses_rpc_counter);
}
void
cfs_atomic_set(&trans->tas_remaining, 0);
cfs_waitq_init(&trans->tas_waitq);
- cfs_spin_lock(&console_session.ses_rpc_lock);
+ spin_lock(&console_session.ses_rpc_lock);
trans->tas_features = console_session.ses_features;
- cfs_spin_unlock(&console_session.ses_rpc_lock);
+ spin_unlock(&console_session.ses_rpc_lock);
*transpp = trans;
return 0;
lstcon_rpc_t, crp_link) {
rpc = crpc->crp_rpc;
- cfs_spin_lock(&rpc->crpc_lock);
+ spin_lock(&rpc->crpc_lock);
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp != 0) { /* rpc done or aborted already */
- if (crpc->crp_stamp == 0) {
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = -EINTR;
- }
- cfs_spin_unlock(&rpc->crpc_lock);
- continue;
- }
+ if (!crpc->crp_posted || /* not posted */
+ crpc->crp_stamp != 0) { /* rpc done or aborted already */
+ if (crpc->crp_stamp == 0) {
+ crpc->crp_stamp = cfs_time_current();
+ crpc->crp_status = -EINTR;
+ }
+ spin_unlock(&rpc->crpc_lock);
+ continue;
+ }
- crpc->crp_stamp = cfs_time_current();
- crpc->crp_status = error;
+ crpc->crp_stamp = cfs_time_current();
+ crpc->crp_status = error;
- cfs_spin_unlock(&rpc->crpc_lock);
+ spin_unlock(&rpc->crpc_lock);
sfw_abort_rpc(rpc);
lstcon_rpc_post(crpc);
}
- cfs_mutex_unlock(&console_session.ses_mutex);
+ mutex_unlock(&console_session.ses_mutex);
cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
lstcon_rpc_trans_check(trans),
rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
- cfs_mutex_lock(&console_session.ses_mutex);
+ mutex_lock(&console_session.ses_mutex);
if (console_session.ses_shutdown)
rc = -ESHUTDOWN;
lstcon_rpc_t, crp_link) {
rpc = crpc->crp_rpc;
- cfs_spin_lock(&rpc->crpc_lock);
+ spin_lock(&rpc->crpc_lock);
- /* free it if not posted or finished already */
- if (!crpc->crp_posted || crpc->crp_finished) {
- cfs_spin_unlock(&rpc->crpc_lock);
+ /* free it if not posted or finished already */
+ if (!crpc->crp_posted || crpc->crp_finished) {
+ spin_unlock(&rpc->crpc_lock);
cfs_list_del_init(&crpc->crp_link);
lstcon_rpc_put(crpc);
cfs_list_del_init(&crpc->crp_link);
count ++;
- cfs_spin_unlock(&rpc->crpc_lock);
+ spin_unlock(&rpc->crpc_lock);
cfs_atomic_dec(&trans->tas_remaining);
}
/* RPC pinger is a special case of transaction,
* it's called by timer at 8 seconds interval.
*/
- cfs_mutex_lock(&console_session.ses_mutex);
+ mutex_lock(&console_session.ses_mutex);
if (console_session.ses_shutdown || console_session.ses_expired) {
- cfs_mutex_unlock(&console_session.ses_mutex);
+ mutex_unlock(&console_session.ses_mutex);
return;
}
LASSERT (crpc->crp_trans == trans);
LASSERT (!cfs_list_empty(&crpc->crp_link));
- cfs_spin_lock(&crpc->crp_rpc->crpc_lock);
+ spin_lock(&crpc->crp_rpc->crpc_lock);
- LASSERT (crpc->crp_posted);
+ LASSERT(crpc->crp_posted);
- if (!crpc->crp_finished) {
- /* in flight */
- cfs_spin_unlock(&crpc->crp_rpc->crpc_lock);
- continue;
- }
+ if (!crpc->crp_finished) {
+ /* in flight */
+ spin_unlock(&crpc->crp_rpc->crpc_lock);
+ continue;
+ }
- cfs_spin_unlock(&crpc->crp_rpc->crpc_lock);
+ spin_unlock(&crpc->crp_rpc->crpc_lock);
lstcon_rpc_get_reply(crpc, &rep);
}
if (console_session.ses_expired) {
- cfs_mutex_unlock(&console_session.ses_mutex);
+ mutex_unlock(&console_session.ses_mutex);
return;
}
ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
stt_add_timer(ptimer);
- cfs_mutex_unlock(&console_session.ses_mutex);
+ mutex_unlock(&console_session.ses_mutex);
}
int
cfs_waitq_signal(&trans->tas_waitq);
}
- cfs_mutex_unlock(&console_session.ses_mutex);
+ mutex_unlock(&console_session.ses_mutex);
- CWARN("Session is shutting down, "
- "waiting for termination of transactions\n");
- cfs_pause(cfs_time_seconds(1));
+ CWARN("Session is shutting down, "
+ "waiting for termination of transactions\n");
+ cfs_pause(cfs_time_seconds(1));
- cfs_mutex_lock(&console_session.ses_mutex);
+ mutex_lock(&console_session.ses_mutex);
}
- cfs_spin_lock(&console_session.ses_rpc_lock);
+ spin_lock(&console_session.ses_rpc_lock);
lst_wait_until((cfs_atomic_read(&console_session.ses_rpc_counter) == 0),
console_session.ses_rpc_lock,
cfs_list_add(&zlist, &console_session.ses_rpc_freelist);
cfs_list_del_init(&console_session.ses_rpc_freelist);
- cfs_spin_unlock(&console_session.ses_rpc_lock);
+ spin_unlock(&console_session.ses_rpc_lock);
while (!cfs_list_empty(&zlist)) {
crpc = cfs_list_entry(zlist.next, lstcon_rpc_t, crp_link);
console_session.ses_ping = NULL;
- cfs_spin_lock_init(&console_session.ses_rpc_lock);
- cfs_atomic_set(&console_session.ses_rpc_counter, 0);
- CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
+ spin_lock_init(&console_session.ses_rpc_lock);
+ cfs_atomic_set(&console_session.ses_rpc_counter, 0);
+ CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
- return 0;
+ return 0;
}
void
return -EPROTO;
}
- cfs_spin_lock(&console_session.ses_rpc_lock);
+ spin_lock(&console_session.ses_rpc_lock);
if (!console_session.ses_feats_updated) {
console_session.ses_feats_updated = 1;
if (console_session.ses_features != feats)
rc = -EPROTO;
- cfs_spin_unlock(&console_session.ses_rpc_lock);
+ spin_unlock(&console_session.ses_rpc_lock);
if (rc != 0) {
CERROR("remote features %x do not match with "
sfw_unpack_message(req);
- cfs_mutex_lock(&console_session.ses_mutex);
+ mutex_lock(&console_session.ses_mutex);
jrep->join_sid = console_session.ses_id;
if (grp != NULL)
lstcon_group_put(grp);
- cfs_mutex_unlock(&console_session.ses_mutex);
+ mutex_unlock(&console_session.ses_mutex);
return rc;
}
console_session.ses_features = LST_FEATS_MASK;
console_session.ses_laststamp = cfs_time_current_sec();
- cfs_mutex_init(&console_session.ses_mutex);
+ mutex_init(&console_session.ses_mutex);
CFS_INIT_LIST_HEAD(&console_session.ses_ndl_list);
CFS_INIT_LIST_HEAD(&console_session.ses_grp_list);
libcfs_deregister_ioctl(&lstcon_ioctl_handler);
- cfs_mutex_lock(&console_session.ses_mutex);
+ mutex_lock(&console_session.ses_mutex);
srpc_shutdown_service(&lstcon_acceptor_service);
srpc_remove_service(&lstcon_acceptor_service);
lstcon_rpc_module_fini();
- cfs_mutex_unlock(&console_session.ses_mutex);
+ mutex_unlock(&console_session.ses_mutex);
LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
LASSERT (cfs_list_empty(&console_session.ses_grp_list));
#define LST_CONSOLE_TIMEOUT 300 /* default console timeout */
typedef struct {
- cfs_mutex_t ses_mutex; /* lock for session, only one thread can enter session */
+ struct mutex ses_mutex; /* only 1 thread in session */
lst_sid_t ses_id; /* global session id */
int ses_key; /* local session key */
int ses_state; /* state of session */
cfs_list_t ses_ndl_list; /* global list of nodes */
cfs_list_t *ses_ndl_hash; /* hash table of nodes */
- cfs_spinlock_t ses_rpc_lock; /* serialize */
+ spinlock_t ses_rpc_lock; /* serialize */
cfs_atomic_t ses_rpc_counter;/* # of initialized RPCs */
cfs_list_t ses_rpc_freelist; /* idle console rpc */
} lstcon_session_t; /*** session descriptor */
cfs_list_t fw_zombie_sessions; /* stopping sessions */
cfs_list_t fw_tests; /* registered test cases */
cfs_atomic_t fw_nzombies; /* # zombie sessions */
- cfs_spinlock_t fw_lock; /* serialise */
- sfw_session_t *fw_session; /* _the_ session */
- int fw_shuttingdown; /* shutdown in progress */
- srpc_server_rpc_t *fw_active_srpc; /* running RPC */
+ spinlock_t fw_lock; /* serialise */
+ sfw_session_t *fw_session; /* _the_ session */
+ int fw_shuttingdown; /* shutdown in progress */
+ srpc_server_rpc_t *fw_active_srpc; /* running RPC */
} sfw_data;
/* forward ref's */
cfs_atomic_inc(&sfw_data.fw_nzombies);
cfs_list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
- cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
- sfw_test_case_t, tsc_list) {
- srpc_abort_service(tsc->tsc_srv_service);
- }
+ cfs_list_for_each_entry_typed(tsc, &sfw_data.fw_tests,
+ sfw_test_case_t, tsc_list) {
+ srpc_abort_service(tsc->tsc_srv_service);
+ }
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
sfw_batch_t, bat_list) {
return; /* wait for active batches to stop */
cfs_list_del_init(&sn->sn_list);
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
- sfw_destroy_session(sn);
+ sfw_destroy_session(sn);
- cfs_spin_lock(&sfw_data.fw_lock);
- return;
+ spin_lock(&sfw_data.fw_lock);
}
#ifndef __KERNEL__
void
sfw_session_expired (void *data)
{
- sfw_session_t *sn = data;
+ sfw_session_t *sn = data;
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
LASSERT (sn->sn_timer_active);
LASSERT (sn == sfw_data.fw_session);
sn->sn_timer_active = 0;
sfw_deactivate_session();
- cfs_spin_unlock(&sfw_data.fw_lock);
- return;
+ spin_unlock(&sfw_data.fw_lock);
}
static inline void
swi_state2str(rpc->crpc_wi.swi_state),
rpc->crpc_aborted, rpc->crpc_status);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- /* my callers must finish all RPCs before shutting me down */
- LASSERT (!sfw_data.fw_shuttingdown);
- cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
+ /* my callers must finish all RPCs before shutting me down */
+ LASSERT(!sfw_data.fw_shuttingdown);
+ cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
- cfs_spin_unlock(&sfw_data.fw_lock);
- return;
+ spin_unlock(&sfw_data.fw_lock);
}
sfw_batch_t *
sfw_init_session(sn, request->mksn_sid,
msg->msg_ses_feats, &request->mksn_name[0]);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- sfw_deactivate_session();
- LASSERT (sfw_data.fw_session == NULL);
- sfw_data.fw_session = sn;
+ sfw_deactivate_session();
+ LASSERT(sfw_data.fw_session == NULL);
+ sfw_data.fw_session = sn;
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
- reply->mksn_status = 0;
- reply->mksn_sid = sn->sn_id;
- reply->mksn_timeout = sn->sn_timeout;
- return 0;
+ reply->mksn_status = 0;
+ reply->mksn_sid = sn->sn_id;
+ reply->mksn_timeout = sn->sn_timeout;
+ return 0;
}
int
return 0;
}
- cfs_spin_lock(&sfw_data.fw_lock);
- sfw_deactivate_session();
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
+ sfw_deactivate_session();
+ spin_unlock(&sfw_data.fw_lock);
- reply->rmsn_status = 0;
- reply->rmsn_sid = LST_INVALID_SID;
- LASSERT (sfw_data.fw_session == NULL);
- return 0;
+ reply->rmsn_status = 0;
+ reply->rmsn_sid = LST_INVALID_SID;
+ LASSERT(sfw_data.fw_session == NULL);
+ return 0;
}
int
}
memset(tsi, 0, sizeof(*tsi));
- cfs_spin_lock_init(&tsi->tsi_lock);
+ spin_lock_init(&tsi->tsi_lock);
cfs_atomic_set(&tsi->tsi_nactive, 0);
CFS_INIT_LIST_HEAD(&tsi->tsi_units);
CFS_INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
return;
/* the test instance is done */
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
- tsi->tsi_stopping = 0;
+ tsi->tsi_stopping = 0;
- cfs_spin_unlock(&tsi->tsi_lock);
+ spin_unlock(&tsi->tsi_lock);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
- sn == sfw_data.fw_session) { /* sn also active */
- cfs_spin_unlock(&sfw_data.fw_lock);
+ if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
+ sn == sfw_data.fw_session) { /* sn also active */
+ spin_unlock(&sfw_data.fw_lock);
return;
}
cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
sfw_batch_t, bat_list) {
if (sfw_batch_active(tsb)) {
- cfs_spin_unlock(&sfw_data.fw_lock);
- return;
- }
- }
+ spin_unlock(&sfw_data.fw_lock);
+ return;
+ }
+ }
- cfs_list_del_init(&sn->sn_list);
- cfs_spin_unlock(&sfw_data.fw_lock);
+ cfs_list_del_init(&sn->sn_list);
+ spin_unlock(&sfw_data.fw_lock);
- sfw_destroy_session(sn);
- return;
+ sfw_destroy_session(sn);
+ return;
}
void
tsi->tsi_ops->tso_done_rpc(tsu, rpc);
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
LASSERT (sfw_test_active(tsi));
LASSERT (!cfs_list_empty(&rpc->crpc_list));
/* dec ref for poster */
srpc_client_rpc_decref(rpc);
- cfs_spin_unlock(&tsi->tsi_lock);
+ spin_unlock(&tsi->tsi_lock);
if (!done) {
swi_schedule_workitem(&tsu->tsu_worker);
unsigned features, int nblk, int blklen,
srpc_client_rpc_t **rpcpp)
{
- srpc_client_rpc_t *rpc = NULL;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ srpc_client_rpc_t *rpc = NULL;
+ sfw_test_instance_t *tsi = tsu->tsu_instance;
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
LASSERT (sfw_test_active(tsi));
cfs_list_del_init(&rpc->crpc_list);
}
- cfs_spin_unlock(&tsi->tsi_lock);
+ spin_unlock(&tsi->tsi_lock);
if (rpc == NULL) {
rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
LASSERT (rpc != NULL);
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
- if (tsi->tsi_stopping) {
- cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
- cfs_spin_unlock(&tsi->tsi_lock);
- goto test_done;
- }
+ if (tsi->tsi_stopping) {
+ cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+ spin_unlock(&tsi->tsi_lock);
+ goto test_done;
+ }
- if (tsu->tsu_loop > 0)
- tsu->tsu_loop--;
+ if (tsu->tsu_loop > 0)
+ tsu->tsu_loop--;
- cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
- cfs_spin_unlock(&tsi->tsi_lock);
+ cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
+ spin_unlock(&tsi->tsi_lock);
- rpc->crpc_timeout = rpc_timeout;
+ rpc->crpc_timeout = rpc_timeout;
- cfs_spin_lock(&rpc->crpc_lock);
- srpc_post_rpc(rpc);
- cfs_spin_unlock(&rpc->crpc_lock);
- return 0;
+ spin_lock(&rpc->crpc_lock);
+ srpc_post_rpc(rpc);
+ spin_unlock(&rpc->crpc_lock);
+ return 0;
test_done:
/*
cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
sfw_test_instance_t, tsi_list) {
- cfs_spin_lock(&tsi->tsi_lock);
+ spin_lock(&tsi->tsi_lock);
- if (!tsi->tsi_is_client ||
- !sfw_test_active(tsi) || tsi->tsi_stopping) {
- cfs_spin_unlock(&tsi->tsi_lock);
- continue;
- }
+ if (!tsi->tsi_is_client ||
+ !sfw_test_active(tsi) || tsi->tsi_stopping) {
+ spin_unlock(&tsi->tsi_lock);
+ continue;
+ }
- tsi->tsi_stopping = 1;
+ tsi->tsi_stopping = 1;
- if (!force) {
- cfs_spin_unlock(&tsi->tsi_lock);
- continue;
- }
+ if (!force) {
+ spin_unlock(&tsi->tsi_lock);
+ continue;
+ }
- /* abort launched rpcs in the test */
- cfs_list_for_each_entry_typed (rpc, &tsi->tsi_active_rpcs,
- srpc_client_rpc_t, crpc_list) {
- cfs_spin_lock(&rpc->crpc_lock);
+ /* abort launched rpcs in the test */
+ cfs_list_for_each_entry_typed(rpc, &tsi->tsi_active_rpcs,
+ srpc_client_rpc_t, crpc_list) {
+ spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, -EINTR);
+ srpc_abort_rpc(rpc, -EINTR);
- cfs_spin_unlock(&rpc->crpc_lock);
- }
+ spin_unlock(&rpc->crpc_lock);
+ }
- cfs_spin_unlock(&tsi->tsi_lock);
- }
+ spin_unlock(&tsi->tsi_lock);
+ }
- return 0;
+ return 0;
}
int
unsigned features = LST_FEATS_MASK;
int rc = 0;
- LASSERT (sfw_data.fw_active_srpc == NULL);
- LASSERT (sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+ LASSERT(sfw_data.fw_active_srpc == NULL);
+ LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- if (sfw_data.fw_shuttingdown) {
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -ESHUTDOWN;
- }
+ if (sfw_data.fw_shuttingdown) {
+ spin_unlock(&sfw_data.fw_lock);
+ return -ESHUTDOWN;
+ }
- /* Remove timer to avoid racing with it or expiring active session */
- if (sfw_del_session_timer() != 0) {
- CERROR ("Dropping RPC (%s) from %s: racing with expiry timer.",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -EAGAIN;
- }
+ /* Remove timer to avoid racing with it or expiring active session */
+ if (sfw_del_session_timer() != 0) {
+ CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer));
+ spin_unlock(&sfw_data.fw_lock);
+ return -EAGAIN;
+ }
- sfw_data.fw_active_srpc = rpc;
- cfs_spin_unlock(&sfw_data.fw_lock);
+ sfw_data.fw_active_srpc = rpc;
+ spin_unlock(&sfw_data.fw_lock);
- sfw_unpack_message(request);
- LASSERT (request->msg_type == srpc_service2request(sv->sv_id));
+ sfw_unpack_message(request);
+ LASSERT(request->msg_type == srpc_service2request(sv->sv_id));
/* rpc module should have checked this */
LASSERT(request->msg_version == SRPC_MSG_VERSION);
features = sfw_data.fw_session->sn_features;
out:
reply->msg_ses_feats = features;
- rpc->srpc_done = sfw_server_rpc_done;
- cfs_spin_lock(&sfw_data.fw_lock);
+ rpc->srpc_done = sfw_server_rpc_done;
+ spin_lock(&sfw_data.fw_lock);
#ifdef __KERNEL__
- if (!sfw_data.fw_shuttingdown)
- sfw_add_session_timer();
+ if (!sfw_data.fw_shuttingdown)
+ sfw_add_session_timer();
#else
- LASSERT (!sfw_data.fw_shuttingdown);
- sfw_add_session_timer();
+ LASSERT(!sfw_data.fw_shuttingdown);
+ sfw_add_session_timer();
#endif
- sfw_data.fw_active_srpc = NULL;
- cfs_spin_unlock(&sfw_data.fw_lock);
- return rc;
+ sfw_data.fw_active_srpc = NULL;
+ spin_unlock(&sfw_data.fw_lock);
+ return rc;
}
int
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
int rc;
- LASSERT (rpc->srpc_bulk != NULL);
- LASSERT (sv->sv_id == SRPC_SERVICE_TEST);
- LASSERT (sfw_data.fw_active_srpc == NULL);
- LASSERT (rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
+ LASSERT(rpc->srpc_bulk != NULL);
+ LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
+ LASSERT(sfw_data.fw_active_srpc == NULL);
+ LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
- if (status != 0) {
- CERROR ("Bulk transfer failed for RPC: "
- "service %s, peer %s, status %d\n",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -EIO;
- }
+ if (status != 0) {
+ CERROR("Bulk transfer failed for RPC: "
+ "service %s, peer %s, status %d\n",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
+ spin_unlock(&sfw_data.fw_lock);
+ return -EIO;
+ }
- if (sfw_data.fw_shuttingdown) {
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -ESHUTDOWN;
- }
+ if (sfw_data.fw_shuttingdown) {
+ spin_unlock(&sfw_data.fw_lock);
+ return -ESHUTDOWN;
+ }
- if (sfw_del_session_timer() != 0) {
- CERROR ("Dropping RPC (%s) from %s: racing with expiry timer",
- sv->sv_name, libcfs_id2str(rpc->srpc_peer));
- cfs_spin_unlock(&sfw_data.fw_lock);
- return -EAGAIN;
- }
+ if (sfw_del_session_timer() != 0) {
+ CERROR("Dropping RPC (%s) from %s: racing with expiry timer",
+ sv->sv_name, libcfs_id2str(rpc->srpc_peer));
+ spin_unlock(&sfw_data.fw_lock);
+ return -EAGAIN;
+ }
- sfw_data.fw_active_srpc = rpc;
- cfs_spin_unlock(&sfw_data.fw_lock);
+ sfw_data.fw_active_srpc = rpc;
+ spin_unlock(&sfw_data.fw_lock);
- rc = sfw_add_test(rpc);
+ rc = sfw_add_test(rpc);
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
#ifdef __KERNEL__
- if (!sfw_data.fw_shuttingdown)
- sfw_add_session_timer();
+ if (!sfw_data.fw_shuttingdown)
+ sfw_add_session_timer();
#else
- LASSERT (!sfw_data.fw_shuttingdown);
- sfw_add_session_timer();
+ LASSERT(!sfw_data.fw_shuttingdown);
+ sfw_add_session_timer();
#endif
- sfw_data.fw_active_srpc = NULL;
- cfs_spin_unlock(&sfw_data.fw_lock);
- return rc;
+ sfw_data.fw_active_srpc = NULL;
+ spin_unlock(&sfw_data.fw_lock);
+ return rc;
}
srpc_client_rpc_t *
{
srpc_client_rpc_t *rpc = NULL;
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
LASSERT (!sfw_data.fw_shuttingdown);
LASSERT (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
done, sfw_client_rpc_fini, priv);
}
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
if (rpc == NULL) {
rpc = srpc_create_client_rpc(peer, service,
void
sfw_abort_rpc (srpc_client_rpc_t *rpc)
{
- LASSERT (cfs_atomic_read(&rpc->crpc_refcount) > 0);
- LASSERT (rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+ LASSERT(cfs_atomic_read(&rpc->crpc_refcount) > 0);
+ LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
- cfs_spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, -EINTR);
- cfs_spin_unlock(&rpc->crpc_lock);
- return;
+ spin_lock(&rpc->crpc_lock);
+ srpc_abort_rpc(rpc, -EINTR);
+ spin_unlock(&rpc->crpc_lock);
+ return;
}
void
sfw_post_rpc (srpc_client_rpc_t *rpc)
{
- cfs_spin_lock(&rpc->crpc_lock);
+ spin_lock(&rpc->crpc_lock);
LASSERT (!rpc->crpc_closed);
LASSERT (!rpc->crpc_aborted);
rpc->crpc_timeout = rpc_timeout;
srpc_post_rpc(rpc);
- cfs_spin_unlock(&rpc->crpc_lock);
- return;
+ spin_unlock(&rpc->crpc_lock);
+ return;
}
static srpc_service_t sfw_services[] =
sfw_data.fw_session = NULL;
sfw_data.fw_active_srpc = NULL;
- cfs_spin_lock_init(&sfw_data.fw_lock);
+ spin_lock_init(&sfw_data.fw_lock);
cfs_atomic_set(&sfw_data.fw_nzombies, 0);
CFS_INIT_LIST_HEAD(&sfw_data.fw_tests);
CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
void
sfw_shutdown (void)
{
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
- int i;
+ srpc_service_t *sv;
+ sfw_test_case_t *tsc;
+ int i;
- cfs_spin_lock(&sfw_data.fw_lock);
+ spin_lock(&sfw_data.fw_lock);
sfw_data.fw_shuttingdown = 1;
#ifdef __KERNEL__
"waiting for %d zombie sessions to die.\n",
cfs_atomic_read(&sfw_data.fw_nzombies));
- cfs_spin_unlock(&sfw_data.fw_lock);
+ spin_unlock(&sfw_data.fw_lock);
for (i = 0; ; i++) {
sv = &sfw_services[i];
CFS_MODULE_PARM(ping_srv_workitems, "i", int, 0644, "# PING server workitems");
typedef struct {
- cfs_spinlock_t pnd_lock; /* serialize */
- int pnd_counter; /* sequence counter */
+ spinlock_t pnd_lock; /* serialize */
+ int pnd_counter; /* sequence counter */
} lst_ping_data_t;
static lst_ping_data_t lst_ping_data;
LASSERT(tsi->tsi_is_client);
LASSERT(sn != NULL && (sn->sn_features & ~LST_FEATS_MASK) == 0);
- cfs_spin_lock_init(&lst_ping_data.pnd_lock);
- lst_ping_data.pnd_counter = 0;
+ spin_lock_init(&lst_ping_data.pnd_lock);
+ lst_ping_data.pnd_counter = 0;
- return 0;
+ return 0;
}
static void
req->pnr_magic = LST_PING_TEST_MAGIC;
- cfs_spin_lock(&lst_ping_data.pnd_lock);
- req->pnr_seq = lst_ping_data.pnd_counter ++;
- cfs_spin_unlock(&lst_ping_data.pnd_lock);
+ spin_lock(&lst_ping_data.pnd_lock);
+ req->pnr_seq = lst_ping_data.pnd_counter++;
+ spin_unlock(&lst_ping_data.pnd_lock);
- cfs_fs_timeval(&tv);
- req->pnr_time_sec = tv.tv_sec;
- req->pnr_time_usec = tv.tv_usec;
+ cfs_fs_timeval(&tv);
+ req->pnr_time_sec = tv.tv_sec;
+ req->pnr_time_usec = tv.tv_usec;
- return rc;
+ return rc;
}
static void
} srpc_state_t;
struct smoketest_rpc {
- cfs_spinlock_t rpc_glock; /* global lock */
- srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
- lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
- srpc_state_t rpc_state;
- srpc_counters_t rpc_counters;
- __u64 rpc_matchbits; /* matchbits counter */
+ spinlock_t rpc_glock; /* global lock */
+ srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
+ lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
+ srpc_state_t rpc_state;
+ srpc_counters_t rpc_counters;
+ __u64 rpc_matchbits; /* matchbits counter */
} srpc_data;
static inline int
void srpc_get_counters (srpc_counters_t *cnt)
{
- cfs_spin_lock(&srpc_data.rpc_glock);
- *cnt = srpc_data.rpc_counters;
- cfs_spin_unlock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
+ *cnt = srpc_data.rpc_counters;
+ spin_unlock(&srpc_data.rpc_glock);
}
void srpc_set_counters (const srpc_counters_t *cnt)
{
- cfs_spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters = *cnt;
- cfs_spin_unlock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters = *cnt;
+ spin_unlock(&srpc_data.rpc_glock);
}
int
static inline __u64
srpc_next_id (void)
{
- __u64 id;
+ __u64 id;
- cfs_spin_lock(&srpc_data.rpc_glock);
- id = srpc_data.rpc_matchbits++;
- cfs_spin_unlock(&srpc_data.rpc_glock);
- return id;
+ spin_lock(&srpc_data.rpc_glock);
+ id = srpc_data.rpc_matchbits++;
+ spin_unlock(&srpc_data.rpc_glock);
+ return id;
}
void
cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
scd->scd_cpt = i;
scd->scd_svc = svc;
- cfs_spin_lock_init(&scd->scd_lock);
+ spin_lock_init(&scd->scd_lock);
CFS_INIT_LIST_HEAD(&scd->scd_rpc_free);
CFS_INIT_LIST_HEAD(&scd->scd_rpc_active);
CFS_INIT_LIST_HEAD(&scd->scd_buf_posted);
if (srpc_service_init(sv) != 0)
return -ENOMEM;
- cfs_spin_lock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
if (srpc_data.rpc_services[id] != NULL) {
- cfs_spin_unlock(&srpc_data.rpc_glock);
+ spin_unlock(&srpc_data.rpc_glock);
goto failed;
}
srpc_data.rpc_services[id] = sv;
- cfs_spin_unlock(&srpc_data.rpc_glock);
+ spin_unlock(&srpc_data.rpc_glock);
CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
return 0;
int
srpc_remove_service (srpc_service_t *sv)
{
- int id = sv->sv_id;
+ int id = sv->sv_id;
- cfs_spin_lock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
- if (srpc_data.rpc_services[id] != sv) {
- cfs_spin_unlock(&srpc_data.rpc_glock);
- return -ENOENT;
- }
+ if (srpc_data.rpc_services[id] != sv) {
+ spin_unlock(&srpc_data.rpc_glock);
+ return -ENOENT;
+ }
- srpc_data.rpc_services[id] = NULL;
- cfs_spin_unlock(&srpc_data.rpc_glock);
- return 0;
+ srpc_data.rpc_services[id] = NULL;
+ spin_unlock(&srpc_data.rpc_glock);
+ return 0;
}
int
LNetInvalidateHandle(&buf->buf_mdh);
cfs_list_add(&buf->buf_list, &scd->scd_buf_posted);
scd->scd_buf_nposted++;
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
rc = srpc_post_passive_rqtbuf(sv->sv_id,
!srpc_serv_is_framework(sv),
* msg and its event handler has been called. So we must add
* buf to scd_buf_posted _before_ dropping scd_lock */
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
if (rc == 0) {
if (!sv->sv_shuttingdown)
return 0;
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
/* srpc_shutdown_service might have tried to unlink me
* when my buf_mdh was still invalid */
LNetMDUnlink(buf->buf_mdh);
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
return 0;
}
return rc; /* don't allow to change scd_buf_posted */
cfs_list_del(&buf->buf_list);
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
LIBCFS_FREE(buf, sizeof(*buf));
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
return rc;
}
/* it's called by workitem scheduler threads, these threads
* should have been set CPT affinity, so buffers will be posted
* on CPT local list of Portal */
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
while (scd->scd_buf_adjust > 0 &&
!scd->scd_svc->sv_shuttingdown) {
scd->scd_buf_adjust--; /* consume it */
scd->scd_buf_posting++;
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
LIBCFS_ALLOC(buf, sizeof(*buf));
if (buf == NULL) {
CERROR("Failed to add new buf to service: %s\n",
scd->scd_svc->sv_name);
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
rc = -ENOMEM;
break;
}
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
if (scd->scd_svc->sv_shuttingdown) {
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
LIBCFS_FREE(buf, sizeof(*buf));
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
rc = -ESHUTDOWN;
break;
}
scd->scd_buf_posting--;
}
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
return 0;
}
LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
scd->scd_buf_err = 0;
scd->scd_buf_err_stamp = 0;
scd->scd_buf_adjust = nbuffer;
/* start to post buffers */
swi_schedule_workitem(&scd->scd_buf_wi);
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
/* framework service only post buffer for one partition */
if (srpc_serv_is_framework(sv))
}
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
/*
* NB: srpc_service_add_buffers() can be called inside
* thread context of lst_sched_serial, and we don't normally
if (scd->scd_buf_err != 0 && rc == 0)
rc = scd->scd_buf_err;
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
}
return rc;
LASSERT(!sv->sv_shuttingdown);
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
num = scd->scd_buf_total + scd->scd_buf_posting;
scd->scd_buf_adjust -= min(nbuffer, num);
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
}
}
LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
if (!swi_deschedule_workitem(&scd->scd_buf_wi))
return 0;
if (scd->scd_buf_nposted > 0) {
CDEBUG(D_NET, "waiting for %d posted buffers to unlink",
scd->scd_buf_nposted);
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
return 0;
}
if (cfs_list_empty(&scd->scd_rpc_active)) {
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
continue;
}
rpc->srpc_wi.swi_workitem.wi_running,
rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
return 0;
}
}
}
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
LIBCFS_FREE(buf, sizeof(*buf));
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
}
void
sv->sv_id, sv->sv_name);
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
/* schedule in-flight RPCs to notice the abort, NB:
* racing with incoming RPCs; complete fix should make test
swi_schedule_workitem(&rpc->srpc_wi);
}
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
}
}
sv->sv_id, sv->sv_name);
cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
/* schedule in-flight RPCs to notice the shutdown */
cfs_list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
swi_schedule_workitem(&rpc->srpc_wi);
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
/* OK to traverse scd_buf_posted without lock, since no one
* touches scd_buf_posted now */
swi_state2str(rpc->srpc_wi.swi_state), status);
if (status != 0) {
- cfs_spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_dropped++;
- cfs_spin_unlock(&srpc_data.rpc_glock);
- }
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.rpcs_dropped++;
+ spin_unlock(&srpc_data.rpc_glock);
+ }
- if (rpc->srpc_done != NULL)
- (*rpc->srpc_done) (rpc);
- LASSERT (rpc->srpc_bulk == NULL);
+ if (rpc->srpc_done != NULL)
+ (*rpc->srpc_done) (rpc);
+ LASSERT(rpc->srpc_bulk == NULL);
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
if (rpc->srpc_reqstbuf != NULL) {
/* NB might drop sv_lock in srpc_service_recycle_buffer, but
cfs_list_add(&rpc->srpc_list, &scd->scd_rpc_free);
}
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
return;
}
LASSERT(wi == &rpc->srpc_wi);
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
if (sv->sv_shuttingdown || rpc->srpc_aborted) {
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
if (rpc->srpc_bulk != NULL)
LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
return 0;
}
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
switch (wi->swi_state) {
default:
rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
rpc->crpc_timeout);
- cfs_spin_lock(&rpc->crpc_lock);
+ spin_lock(&rpc->crpc_lock);
- rpc->crpc_timeout = 0;
- srpc_abort_rpc(rpc, -ETIMEDOUT);
+ rpc->crpc_timeout = 0;
+ srpc_abort_rpc(rpc, -ETIMEDOUT);
- cfs_spin_unlock(&rpc->crpc_lock);
+ spin_unlock(&rpc->crpc_lock);
- cfs_spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_expired++;
- cfs_spin_unlock(&srpc_data.rpc_glock);
- return;
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.rpcs_expired++;
+ spin_unlock(&srpc_data.rpc_glock);
}
inline void
void
srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
{
- /* timer not planted or already exploded */
- if (rpc->crpc_timeout == 0) return;
+ /* timer not planted or already exploded */
+ if (rpc->crpc_timeout == 0)
+ return;
- /* timer sucessfully defused */
- if (stt_del_timer(&rpc->crpc_timer)) return;
+ /* timer sucessfully defused */
+ if (stt_del_timer(&rpc->crpc_timer))
+ return;
#ifdef __KERNEL__
- /* timer detonated, wait for it to explode */
- while (rpc->crpc_timeout != 0) {
- cfs_spin_unlock(&rpc->crpc_lock);
+ /* timer detonated, wait for it to explode */
+ while (rpc->crpc_timeout != 0) {
+ spin_unlock(&rpc->crpc_lock);
- cfs_schedule();
+ cfs_schedule();
- cfs_spin_lock(&rpc->crpc_lock);
- }
+ spin_lock(&rpc->crpc_lock);
+ }
#else
- LBUG(); /* impossible in single-threaded runtime */
+ LBUG(); /* impossible in single-threaded runtime */
#endif
- return;
}
void
srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
{
- swi_workitem_t *wi = &rpc->crpc_wi;
+ swi_workitem_t *wi = &rpc->crpc_wi;
- LASSERT (status != 0 || wi->swi_state == SWI_STATE_DONE);
+ LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
- cfs_spin_lock(&rpc->crpc_lock);
+ spin_lock(&rpc->crpc_lock);
rpc->crpc_closed = 1;
if (rpc->crpc_status == 0)
LASSERT (!srpc_event_pending(rpc));
swi_exit_workitem(wi);
- cfs_spin_unlock(&rpc->crpc_lock);
+ spin_unlock(&rpc->crpc_lock);
(*rpc->crpc_done)(rpc);
return;
LASSERT (rpc != NULL);
LASSERT (wi == &rpc->crpc_wi);
- cfs_spin_lock(&rpc->crpc_lock);
+ spin_lock(&rpc->crpc_lock);
- if (rpc->crpc_aborted) {
- cfs_spin_unlock(&rpc->crpc_lock);
- goto abort;
- }
+ if (rpc->crpc_aborted) {
+ spin_unlock(&rpc->crpc_lock);
+ goto abort;
+ }
- cfs_spin_unlock(&rpc->crpc_lock);
+ spin_unlock(&rpc->crpc_lock);
switch (wi->swi_state) {
default:
return 1;
}
- if (rc != 0) {
- cfs_spin_lock(&rpc->crpc_lock);
- srpc_abort_rpc(rpc, rc);
- cfs_spin_unlock(&rpc->crpc_lock);
- }
+ if (rc != 0) {
+ spin_lock(&rpc->crpc_lock);
+ srpc_abort_rpc(rpc, rc);
+ spin_unlock(&rpc->crpc_lock);
+ }
abort:
if (rpc->crpc_aborted) {
LASSERT(buffer != NULL);
rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
/* Repost buffer before replying since test client
rpc->srpc_reqstbuf = NULL;
}
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
ev->ev_fired = 0;
ev->ev_data = rpc;
LASSERT (!cfs_in_interrupt());
if (ev->status != 0) {
- cfs_spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.errors++;
- cfs_spin_unlock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.errors++;
+ spin_unlock(&srpc_data.rpc_glock);
}
rpcev->ev_lnet = ev->type;
LBUG ();
case SRPC_REQUEST_SENT:
if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
- cfs_spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_sent++;
- cfs_spin_unlock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.rpcs_sent++;
+ spin_unlock(&srpc_data.rpc_glock);
}
case SRPC_REPLY_RCVD:
case SRPC_BULK_REQ_RCVD:
LBUG ();
}
- cfs_spin_lock(&crpc->crpc_lock);
+ spin_lock(&crpc->crpc_lock);
- LASSERT (rpcev->ev_fired == 0);
- rpcev->ev_fired = 1;
- rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
- -EINTR : ev->status;
- swi_schedule_workitem(&crpc->crpc_wi);
+ LASSERT(rpcev->ev_fired == 0);
+ rpcev->ev_fired = 1;
+ rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
+ -EINTR : ev->status;
+ swi_schedule_workitem(&crpc->crpc_wi);
- cfs_spin_unlock(&crpc->crpc_lock);
- break;
+ spin_unlock(&crpc->crpc_lock);
+ break;
- case SRPC_REQUEST_RCVD:
+ case SRPC_REQUEST_RCVD:
scd = rpcev->ev_data;
sv = scd->scd_svc;
LASSERT(rpcev == &scd->scd_ev);
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
LASSERT (ev->unlinked);
LASSERT (ev->type == LNET_EVENT_PUT ||
if (sv->sv_shuttingdown) {
/* Leave buffer on scd->scd_buf_nposted since
* srpc_finish_service needs to traverse it. */
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
break;
}
&scd->scd_buf_blocked);
}
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
- cfs_spin_lock(&srpc_data.rpc_glock);
- srpc_data.rpc_counters.rpcs_rcvd++;
- cfs_spin_unlock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
+ srpc_data.rpc_counters.rpcs_rcvd++;
+ spin_unlock(&srpc_data.rpc_glock);
break;
case SRPC_BULK_GET_RPLD:
case SRPC_BULK_PUT_SENT:
if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
- cfs_spin_lock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
- if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
- srpc_data.rpc_counters.bulk_get += ev->mlength;
- else
- srpc_data.rpc_counters.bulk_put += ev->mlength;
+ if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
+ srpc_data.rpc_counters.bulk_get += ev->mlength;
+ else
+ srpc_data.rpc_counters.bulk_put += ev->mlength;
- cfs_spin_unlock(&srpc_data.rpc_glock);
- }
- case SRPC_REPLY_SENT:
+ spin_unlock(&srpc_data.rpc_glock);
+ }
+ case SRPC_REPLY_SENT:
srpc = rpcev->ev_data;
scd = srpc->srpc_scd;
LASSERT(rpcev == &srpc->srpc_ev);
- cfs_spin_lock(&scd->scd_lock);
+ spin_lock(&scd->scd_lock);
rpcev->ev_fired = 1;
rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
-EINTR : ev->status;
swi_schedule_workitem(&srpc->srpc_wi);
- cfs_spin_unlock(&scd->scd_lock);
+ spin_unlock(&scd->scd_lock);
break;
}
}
int
srpc_startup (void)
{
- int rc;
+ int rc;
- memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
- cfs_spin_lock_init(&srpc_data.rpc_glock);
+ memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
+ spin_lock_init(&srpc_data.rpc_glock);
/* 1 second pause to avoid timestamp reuse */
cfs_pause(cfs_time_seconds(1));
default:
LBUG ();
case SRPC_STATE_RUNNING:
- cfs_spin_lock(&srpc_data.rpc_glock);
+ spin_lock(&srpc_data.rpc_glock);
for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
srpc_service_t *sv = srpc_data.rpc_services[i];
i, sv->sv_name);
}
- cfs_spin_unlock(&srpc_data.rpc_glock);
+ spin_unlock(&srpc_data.rpc_glock);
stt_shutdown();
/* client-side state of a RPC */
typedef struct srpc_client_rpc {
- cfs_list_t crpc_list; /* chain on user's lists */
- cfs_spinlock_t crpc_lock; /* serialize */
+ cfs_list_t crpc_list; /* chain on user's lists */
+ spinlock_t crpc_lock; /* serialize */
int crpc_service;
cfs_atomic_t crpc_refcount;
int crpc_timeout; /* # seconds to wait for reply */
/* CPU partition data of srpc service */
struct srpc_service_cd {
/** serialize */
- cfs_spinlock_t scd_lock;
+ spinlock_t scd_lock;
/** backref to service */
struct srpc_service *scd_svc;
/** event buffer */
int tsi_concur; /* concurrency */
int tsi_loop; /* loop count */
- /* status of test instance */
- cfs_spinlock_t tsi_lock; /* serialize */
+ /* status of test instance */
+ spinlock_t tsi_lock; /* serialize */
int tsi_stopping:1; /* test is stopping */
cfs_atomic_t tsi_nactive; /* # of active test unit */
cfs_list_t tsi_units; /* test units */
CFS_INIT_LIST_HEAD(&rpc->crpc_list);
swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc,
lst_sched_test[lnet_cpt_of_nid(peer.nid)]);
- cfs_spin_lock_init(&rpc->crpc_lock);
+ spin_lock_init(&rpc->crpc_lock);
cfs_atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
rpc->crpc_dest = peer;
#endif
-#define lst_wait_until(cond, lock, fmt, ...) \
-do { \
- int __I = 2; \
- while (!(cond)) { \
- CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \
- fmt, ## __VA_ARGS__); \
- cfs_spin_unlock(&(lock)); \
- \
- selftest_wait_events(); \
- \
- cfs_spin_lock(&(lock)); \
- } \
+#define lst_wait_until(cond, lock, fmt, ...) \
+do { \
+ int __I = 2; \
+ while (!(cond)) { \
+ CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET, \
+ fmt, ## __VA_ARGS__); \
+ spin_unlock(&(lock)); \
+ \
+ selftest_wait_events(); \
+ \
+ spin_lock(&(lock)); \
+ } \
} while (0)
static inline void
* sorted by increasing expiry time. The number of slots is 2**7 (128),
* to cover a time period of 1024 seconds into the future before wrapping.
*/
-#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
-#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
-#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
-#define STTIMER_NSLOTS (1 << 7)
-#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
+#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
+#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
+#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
+#define STTIMER_NSLOTS (1 << 7)
+#define STTIMER_SLOT(t) (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
(STTIMER_NSLOTS - 1))])
struct st_timer_data {
- cfs_spinlock_t stt_lock;
+ spinlock_t stt_lock;
/* start time of the slot processed previously */
cfs_time_t stt_prev_slot;
cfs_list_t stt_hash[STTIMER_NSLOTS];
} stt_data;
void
-stt_add_timer (stt_timer_t *timer)
+stt_add_timer(stt_timer_t *timer)
{
- cfs_list_t *pos;
+ cfs_list_t *pos;
- cfs_spin_lock(&stt_data.stt_lock);
+ spin_lock(&stt_data.stt_lock);
#ifdef __KERNEL__
LASSERT (stt_data.stt_nthreads > 0);
}
cfs_list_add(&timer->stt_list, pos);
- cfs_spin_unlock(&stt_data.stt_lock);
+ spin_unlock(&stt_data.stt_lock);
}
/*
int
stt_del_timer (stt_timer_t *timer)
{
- int ret = 0;
+ int ret = 0;
- cfs_spin_lock(&stt_data.stt_lock);
+ spin_lock(&stt_data.stt_lock);
#ifdef __KERNEL__
LASSERT (stt_data.stt_nthreads > 0);
cfs_list_del_init(&timer->stt_list);
}
- cfs_spin_unlock(&stt_data.stt_lock);
- return ret;
+ spin_unlock(&stt_data.stt_lock);
+ return ret;
}
/* called with stt_data.stt_lock held */
break;
cfs_list_del_init(&timer->stt_list);
- cfs_spin_unlock(&stt_data.stt_lock);
+ spin_unlock(&stt_data.stt_lock);
- expired++;
- (*timer->stt_func) (timer->stt_data);
-
- cfs_spin_lock(&stt_data.stt_lock);
- }
+ expired++;
+ (*timer->stt_func) (timer->stt_data);
+
+ spin_lock(&stt_data.stt_lock);
+ }
- return expired;
+ return expired;
}
int
now = cfs_time_current_sec();
this_slot = now & STTIMER_SLOTTIMEMASK;
- cfs_spin_lock(&stt_data.stt_lock);
+ spin_lock(&stt_data.stt_lock);
- while (cfs_time_aftereq(this_slot, *last)) {
- expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
- this_slot = cfs_time_sub(this_slot, STTIMER_SLOTTIME);
- }
+ while (cfs_time_aftereq(this_slot, *last)) {
+ expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
+ this_slot = cfs_time_sub(this_slot, STTIMER_SLOTTIME);
+ }
- *last = now & STTIMER_SLOTTIMEMASK;
- cfs_spin_unlock(&stt_data.stt_lock);
- return expired;
+ *last = now & STTIMER_SLOTTIMEMASK;
+ spin_unlock(&stt_data.stt_lock);
+ return expired;
}
#ifdef __KERNEL__
rc);
}
- cfs_spin_lock(&stt_data.stt_lock);
- stt_data.stt_nthreads--;
- cfs_spin_unlock(&stt_data.stt_lock);
- return 0;
+ spin_lock(&stt_data.stt_lock);
+ stt_data.stt_nthreads--;
+ spin_unlock(&stt_data.stt_lock);
+ return 0;
}
int
if (pid < 0)
return (int)pid;
- cfs_spin_lock(&stt_data.stt_lock);
- stt_data.stt_nthreads++;
- cfs_spin_unlock(&stt_data.stt_lock);
- return 0;
+ spin_lock(&stt_data.stt_lock);
+ stt_data.stt_nthreads++;
+ spin_unlock(&stt_data.stt_lock);
+ return 0;
}
#else /* !__KERNEL__ */
stt_data.stt_shuttingdown = 0;
stt_data.stt_prev_slot = cfs_time_current_sec() & STTIMER_SLOTTIMEMASK;
- cfs_spin_lock_init(&stt_data.stt_lock);
+ spin_lock_init(&stt_data.stt_lock);
for (i = 0; i < STTIMER_NSLOTS; i++)
CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
{
int i;
- cfs_spin_lock(&stt_data.stt_lock);
+ spin_lock(&stt_data.stt_lock);
for (i = 0; i < STTIMER_NSLOTS; i++)
LASSERT (cfs_list_empty(&stt_data.stt_hash[i]));
stt_data.stt_nthreads);
#endif
- cfs_spin_unlock(&stt_data.stt_lock);
- return;
+ spin_unlock(&stt_data.stt_lock);
}
return;
}
- if (cfs_mt_atomic_read(&peer->up_refcount) == 2) {
+ if (mt_atomic_read(&peer->up_refcount) == 2) {
int i;
for (i = 0; i < N_CONN_TYPES; i++)
LASSERT (peer->up_conns[i] == NULL);
CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
pthread_mutex_init(&conn->uc_lock, NULL);
- cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
+ mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
*connp = conn;
return 0;
CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
pthread_mutex_init(&conn->uc_lock, NULL);
- cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
+ mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
*connp = conn;
return 0;
peer->up_incrn_is_set = 0;
peer->up_errored = 0;
peer->up_last_alive = 0;
- cfs_mt_atomic_set (&peer->up_refcount, 1); /* 1 ref for caller */
+ mt_atomic_set(&peer->up_refcount, 1); /* 1 ref for caller */
pthread_mutex_init(&peer->up_lock, NULL);
pthread_mutex_lock(&net->un_lock);
}
/* unblock usocklnd_shutdown() */
- cfs_mt_complete(&pt_data->upt_completion);
+ mt_complete(&pt_data->upt_completion);
return 0;
}
libcfs_sock_release(pt->upt_notifier[1]);
pthread_mutex_destroy(&pt->upt_pollrequests_lock);
- cfs_mt_fini_completion(&pt->upt_completion);
+ mt_fini_completion(&pt->upt_completion);
LIBCFS_FREE (pt->upt_pollfd,
sizeof(struct pollfd) * pt->upt_npollfd);
CFS_INIT_LIST_HEAD (&pt->upt_pollrequests);
CFS_INIT_LIST_HEAD (&pt->upt_stale_list);
pthread_mutex_init(&pt->upt_pollrequests_lock, NULL);
- cfs_mt_init_completion(&pt->upt_completion);
+ mt_init_completion(&pt->upt_completion);
}
/* Initialize peer hash list */
for (i = 0; i < n; i++) {
usock_pollthread_t *pt = &usock_data.ud_pollthreads[i];
usocklnd_wakeup_pollthread(i);
- cfs_mt_wait_for_completion(&pt->upt_completion);
+ mt_wait_for_completion(&pt->upt_completion);
}
pthread_rwlock_destroy(&usock_data.ud_peers_lock);
int uc_sending; /* send op is in progress */
usock_tx_t *uc_tx_hello; /* fake tx with hello */
- cfs_mt_atomic_t uc_refcount; /* # of users */
+ mt_atomic_t uc_refcount; /* # of users */
pthread_mutex_t uc_lock; /* serialize */
int uc_errored; /* a flag for lnet_notify() */
} usock_conn_t;
__u64 up_incarnation; /* peer's incarnation */
int up_incrn_is_set;/* 0 if peer's incarnation
* hasn't been set so far */
- cfs_mt_atomic_t up_refcount; /* # of users */
+ mt_atomic_t up_refcount; /* # of users */
pthread_mutex_t up_lock; /* serialize */
int up_errored; /* a flag for lnet_notify() */
cfs_time_t up_last_alive; /* when the peer was last alive */
cfs_list_t upt_pollrequests; /* list of poll requests */
pthread_mutex_t upt_pollrequests_lock; /* serialize */
int upt_errno; /* non-zero if errored */
- cfs_mt_completion_t upt_completion; /* wait/signal facility for
+ mt_completion_t upt_completion; /* wait/signal facility for
* syncronizing shutdown */
} usock_pollthread_t;
static inline void
usocklnd_conn_addref(usock_conn_t *conn)
{
- LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
- cfs_mt_atomic_inc(&conn->uc_refcount);
+ LASSERT(mt_atomic_read(&conn->uc_refcount) > 0);
+ mt_atomic_inc(&conn->uc_refcount);
}
void usocklnd_destroy_conn(usock_conn_t *conn);
static inline void
usocklnd_conn_decref(usock_conn_t *conn)
{
- LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
- if (cfs_mt_atomic_dec_and_test(&conn->uc_refcount))
+ LASSERT(mt_atomic_read(&conn->uc_refcount) > 0);
+ if (mt_atomic_dec_and_test(&conn->uc_refcount))
usocklnd_destroy_conn(conn);
}
static inline void
usocklnd_peer_addref(usock_peer_t *peer)
{
- LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
- cfs_mt_atomic_inc(&peer->up_refcount);
+ LASSERT(mt_atomic_read(&peer->up_refcount) > 0);
+ mt_atomic_inc(&peer->up_refcount);
}
void usocklnd_destroy_peer(usock_peer_t *peer);
static inline void
usocklnd_peer_decref(usock_peer_t *peer)
{
- LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
- if (cfs_mt_atomic_dec_and_test(&peer->up_refcount))
+ LASSERT(mt_atomic_read(&peer->up_refcount) > 0);
+ if (mt_atomic_dec_and_test(&peer->up_refcount))
usocklnd_destroy_peer(peer);
}
* Ask client for new range, assign that range to ->seq_space and write
* seq state to backing store should be atomic.
*/
- cfs_mutex_lock(&seq->lss_mutex);
+ mutex_lock(&seq->lss_mutex);
if (cli == NULL) {
CDEBUG(D_INFO, "%s: Detached sequence client %s\n",
cli->lcs_space.lsr_index = seq->lss_site->ms_node_id;
EXIT;
out_up:
- cfs_mutex_unlock(&seq->lss_mutex);
+ mutex_unlock(&seq->lss_mutex);
return rc;
}
EXPORT_SYMBOL(seq_server_set_cli);
int rc;
ENTRY;
- cfs_mutex_lock(&seq->lss_mutex);
+ mutex_lock(&seq->lss_mutex);
rc = __seq_server_alloc_super(seq, out, env);
- cfs_mutex_unlock(&seq->lss_mutex);
+ mutex_unlock(&seq->lss_mutex);
RETURN(rc);
}
int rc;
ENTRY;
- cfs_mutex_lock(&seq->lss_mutex);
+ mutex_lock(&seq->lss_mutex);
rc = __seq_server_alloc_meta(seq, out, env);
- cfs_mutex_unlock(&seq->lss_mutex);
+ mutex_unlock(&seq->lss_mutex);
RETURN(rc);
}
range_init(&seq->lss_hiwater_set);
seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;
- cfs_mutex_init(&seq->lss_mutex);
+ mutex_init(&seq->lss_mutex);
seq->lss_width = is_srv ?
LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
int rc;
ENTRY;
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
#ifdef __KERNEL__
if (seq->lcs_srv) {
#ifdef __KERNEL__
}
#endif
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
if (seq->lcs_update) {
cfs_waitq_add(&seq->lcs_waitq, link);
cfs_set_current_state(CFS_TASK_UNINT);
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
cfs_waitq_wait(link, CFS_TASK_UNINT);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
cfs_waitq_del(&seq->lcs_waitq, link);
cfs_set_current_state(CFS_TASK_RUNNING);
return -EAGAIN;
}
++seq->lcs_update;
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
return 0;
}
static void seq_fid_alloc_fini(struct lu_client_seq *seq)
{
LASSERT(seq->lcs_update == 1);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
--seq->lcs_update;
cfs_waitq_signal(&seq->lcs_waitq);
}
int rc;
LASSERT(seqnr != NULL);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
cfs_waitlink_init(&link);
while (1) {
CERROR("%s: Can't allocate new sequence, "
"rc %d\n", seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
return rc;
}
* to setup FLD for it.
*/
seq_fid_alloc_fini(seq);
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
return rc;
}
LASSERT(fid != NULL);
cfs_waitlink_init(&link);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
seq->lcs_fid.f_oid = seq->lcs_width;
CERROR("%s: Can't allocate new sequence, "
"rc %d\n", seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
}
*fid = seq->lcs_fid;
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid));
RETURN(rc);
LASSERT(seq != NULL);
cfs_waitlink_init(&link);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
while (seq->lcs_update) {
cfs_waitq_add(&seq->lcs_waitq, &link);
cfs_set_current_state(CFS_TASK_UNINT);
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
cfs_waitq_wait(&link, CFS_TASK_UNINT);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
cfs_waitq_del(&seq->lcs_waitq, &link);
cfs_set_current_state(CFS_TASK_RUNNING);
}
seq->lcs_space.lsr_index = -1;
range_init(&seq->lcs_space);
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
}
EXPORT_SYMBOL(seq_client_flush);
seq->lcs_exp = exp;
seq->lcs_srv = srv;
seq->lcs_type = type;
- cfs_mutex_init(&seq->lcs_mutex);
+ mutex_init(&seq->lcs_mutex);
seq->lcs_width = LUSTRE_SEQ_MAX_WIDTH;
cfs_waitq_init(&seq->lcs_waitq);
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lss_mutex);
+ mutex_lock(&seq->lss_mutex);
rc = seq_proc_write_common(file, buffer, count,
data, &seq->lss_space);
if (rc == 0) {
seq->lss_name, PRANGE(&seq->lss_space));
}
- cfs_mutex_unlock(&seq->lss_mutex);
+ mutex_unlock(&seq->lss_mutex);
RETURN(count);
}
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lss_mutex);
+ mutex_lock(&seq->lss_mutex);
rc = seq_proc_read_common(page, start, off, count, eof,
data, &seq->lss_space);
- cfs_mutex_unlock(&seq->lss_mutex);
+ mutex_unlock(&seq->lss_mutex);
RETURN(rc);
}
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lss_mutex);
+ mutex_lock(&seq->lss_mutex);
rc = lprocfs_write_helper(buffer, count, &val);
if (rc != 0) {
CDEBUG(D_INFO, "%s: Width: "LPU64"\n",
seq->lss_name, seq->lss_width);
out_unlock:
- cfs_mutex_unlock(&seq->lss_mutex);
+ mutex_unlock(&seq->lss_mutex);
RETURN(count);
}
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lss_mutex);
+ mutex_lock(&seq->lss_mutex);
rc = snprintf(page, count, LPU64"\n", seq->lss_width);
- cfs_mutex_unlock(&seq->lss_mutex);
+ mutex_unlock(&seq->lss_mutex);
RETURN(rc);
}
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
rc = seq_proc_write_common(file, buffer, count,
data, &seq->lcs_space);
seq->lcs_name, PRANGE(&seq->lcs_space));
}
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
RETURN(count);
}
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
rc = seq_proc_read_common(page, start, off, count, eof,
data, &seq->lcs_space);
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
rc = lprocfs_write_helper(buffer, count, &val);
if (rc) {
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
}
}
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
RETURN(count);
}
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
rc = snprintf(page, count, LPU64"\n", seq->lcs_width);
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
LASSERT(seq != NULL);
- cfs_mutex_lock(&seq->lcs_mutex);
+ mutex_lock(&seq->lcs_mutex);
rc = snprintf(page, count, DFID"\n", PFID(&seq->lcs_fid));
- cfs_mutex_unlock(&seq->lcs_mutex);
+ mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
}
CFS_INIT_LIST_HEAD(&cache->fci_lru);
cache->fci_cache_count = 0;
- cfs_spin_lock_init(&cache->fci_lock);
+ spin_lock_init(&cache->fci_lock);
strncpy(cache->fci_name, name,
sizeof(cache->fci_name));
*/
void fld_cache_flush(struct fld_cache *cache)
{
- ENTRY;
+ ENTRY;
- cfs_spin_lock(&cache->fci_lock);
- cache->fci_cache_size = 0;
- fld_cache_shrink(cache);
- cfs_spin_unlock(&cache->fci_lock);
+ spin_lock(&cache->fci_lock);
+ cache->fci_cache_size = 0;
+ fld_cache_shrink(cache);
+ spin_unlock(&cache->fci_lock);
- EXIT;
+ EXIT;
}
/**
* So we don't need to search new entry before starting insertion loop.
*/
- cfs_spin_lock(&cache->fci_lock);
+ spin_lock(&cache->fci_lock);
fld_cache_shrink(cache);
head = &cache->fci_entries_head;
/* Add new entry to cache and lru list. */
fld_cache_entry_add(cache, f_new, prev);
out:
- cfs_spin_unlock(&cache->fci_lock);
- EXIT;
+ spin_unlock(&cache->fci_lock);
+ EXIT;
}
/**
* lookup \a seq sequence for range in fld cache.
*/
int fld_cache_lookup(struct fld_cache *cache,
- const seqno_t seq, struct lu_seq_range *range)
+ const seqno_t seq, struct lu_seq_range *range)
{
- struct fld_cache_entry *flde;
- cfs_list_t *head;
- ENTRY;
-
+ struct fld_cache_entry *flde;
+ cfs_list_t *head;
+ ENTRY;
- cfs_spin_lock(&cache->fci_lock);
+ spin_lock(&cache->fci_lock);
head = &cache->fci_entries_head;
cache->fci_stat.fst_count++;
/* update position of this entry in lru list. */
cfs_list_move(&flde->fce_lru, &cache->fci_lru);
cache->fci_stat.fst_cache++;
- cfs_spin_unlock(&cache->fci_lock);
- RETURN(0);
- }
- }
- cfs_spin_unlock(&cache->fci_lock);
- RETURN(-ENOENT);
+ spin_unlock(&cache->fci_lock);
+ RETURN(0);
+ }
+ }
+ spin_unlock(&cache->fci_lock);
+ RETURN(-ENOENT);
}
ENTRY;
info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- cfs_mutex_lock(&fld->lsf_lock);
+ mutex_lock(&fld->lsf_lock);
erange = &info->fti_lrange;
new = &info->fti_irange;
if (rc == 0)
fld_cache_insert(fld->lsf_cache, new);
- cfs_mutex_unlock(&fld->lsf_lock);
+ mutex_unlock(&fld->lsf_lock);
CDEBUG((rc != 0 ? D_ERROR : D_INFO),
"%s: FLD create: given range : "DRANGE
cache_threshold = cache_size *
FLD_SERVER_CACHE_THRESHOLD / 100;
- cfs_mutex_init(&fld->lsf_lock);
+ mutex_init(&fld->lsf_lock);
fld->lsf_cache = fld_cache_init(fld->lsf_name,
cache_size, cache_threshold);
if (IS_ERR(fld->lsf_cache)) {
};
struct fld_cache {
- /**
- * Cache guard, protects fci_hash mostly because others immutable after
- * init is finished.
- */
- cfs_spinlock_t fci_lock;
+ /**
+ * Cache guard, protects fci_hash mostly because others immutable after
+ * init is finished.
+ */
+ spinlock_t fci_lock;
/**
* Cache shrink threshold */
};
static struct lu_fld_target *
-fld_client_get_target(struct lu_client_fld *fld,
- seqno_t seq)
+fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
{
- struct lu_fld_target *target;
- ENTRY;
+ struct lu_fld_target *target;
+ ENTRY;
- LASSERT(fld->lcf_hash != NULL);
+ LASSERT(fld->lcf_hash != NULL);
- cfs_spin_lock(&fld->lcf_lock);
- target = fld->lcf_hash->fh_scan_func(fld, seq);
- cfs_spin_unlock(&fld->lcf_lock);
+ spin_lock(&fld->lcf_lock);
+ target = fld->lcf_hash->fh_scan_func(fld, seq);
+ spin_unlock(&fld->lcf_lock);
if (target != NULL) {
CDEBUG(D_INFO, "%s: Found target (idx "LPU64
if (target == NULL)
RETURN(-ENOMEM);
- cfs_spin_lock(&fld->lcf_lock);
- cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
- if (tmp->ft_idx == tar->ft_idx) {
- cfs_spin_unlock(&fld->lcf_lock);
+ spin_lock(&fld->lcf_lock);
+ cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
+ if (tmp->ft_idx == tar->ft_idx) {
+ spin_unlock(&fld->lcf_lock);
OBD_FREE_PTR(target);
CERROR("Target %s exists in FLD and known as %s:#"LPU64"\n",
name, fld_target_name(tmp), tmp->ft_idx);
&fld->lcf_targets);
fld->lcf_count++;
- cfs_spin_unlock(&fld->lcf_lock);
+ spin_unlock(&fld->lcf_lock);
- RETURN(0);
+ RETURN(0);
}
EXPORT_SYMBOL(fld_client_add_target);
/* Remove export from FLD */
-int fld_client_del_target(struct lu_client_fld *fld,
- __u64 idx)
+int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
{
- struct lu_fld_target *target, *tmp;
- ENTRY;
+ struct lu_fld_target *target, *tmp;
+ ENTRY;
- cfs_spin_lock(&fld->lcf_lock);
- cfs_list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
- if (target->ft_idx == idx) {
- fld->lcf_count--;
- cfs_list_del(&target->ft_chain);
- cfs_spin_unlock(&fld->lcf_lock);
+ spin_lock(&fld->lcf_lock);
+ cfs_list_for_each_entry_safe(target, tmp,
+ &fld->lcf_targets, ft_chain) {
+ if (target->ft_idx == idx) {
+ fld->lcf_count--;
+ cfs_list_del(&target->ft_chain);
+ spin_unlock(&fld->lcf_lock);
if (target->ft_exp != NULL)
class_export_put(target->ft_exp);
RETURN(0);
}
}
- cfs_spin_unlock(&fld->lcf_lock);
- RETURN(-ENOENT);
+ spin_unlock(&fld->lcf_lock);
+ RETURN(-ENOENT);
}
EXPORT_SYMBOL(fld_client_del_target);
}
fld->lcf_count = 0;
- cfs_spin_lock_init(&fld->lcf_lock);
+ spin_lock_init(&fld->lcf_lock);
fld->lcf_hash = &fld_hash[hash];
fld->lcf_flags = LUSTRE_FLD_INIT;
CFS_INIT_LIST_HEAD(&fld->lcf_targets);
void fld_client_fini(struct lu_client_fld *fld)
{
- struct lu_fld_target *target, *tmp;
- ENTRY;
+ struct lu_fld_target *target, *tmp;
+ ENTRY;
- cfs_spin_lock(&fld->lcf_lock);
+ spin_lock(&fld->lcf_lock);
cfs_list_for_each_entry_safe(target, tmp,
&fld->lcf_targets, ft_chain) {
fld->lcf_count--;
class_export_put(target->ft_exp);
OBD_FREE_PTR(target);
}
- cfs_spin_unlock(&fld->lcf_lock);
+ spin_unlock(&fld->lcf_lock);
if (fld->lcf_cache != NULL) {
if (!IS_ERR(fld->lcf_cache))
int total = 0, rc;
ENTRY;
- LASSERT(fld != NULL);
+ LASSERT(fld != NULL);
- cfs_spin_lock(&fld->lcf_lock);
+ spin_lock(&fld->lcf_lock);
cfs_list_for_each_entry(target,
&fld->lcf_targets, ft_chain)
{
if (count == 0)
break;
}
- cfs_spin_unlock(&fld->lcf_lock);
+ spin_unlock(&fld->lcf_lock);
RETURN(total);
}
int rc;
ENTRY;
- LASSERT(fld != NULL);
+ LASSERT(fld != NULL);
- cfs_spin_lock(&fld->lcf_lock);
- rc = snprintf(page, count, "%s\n",
- fld->lcf_hash->fh_name);
- cfs_spin_unlock(&fld->lcf_lock);
+ spin_lock(&fld->lcf_lock);
+ rc = snprintf(page, count, "%s\n", fld->lcf_hash->fh_name);
+ spin_unlock(&fld->lcf_lock);
RETURN(rc);
}
}
}
- if (hash != NULL) {
- cfs_spin_lock(&fld->lcf_lock);
- fld->lcf_hash = hash;
- cfs_spin_unlock(&fld->lcf_lock);
+ if (hash != NULL) {
+ spin_lock(&fld->lcf_lock);
+ fld->lcf_hash = hash;
+ spin_unlock(&fld->lcf_lock);
- CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
- fld->lcf_name, hash->fh_name);
- }
+ CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
+ fld->lcf_name, hash->fh_name);
+ }
- RETURN(count);
+ RETURN(count);
}
static int
*/
/** @{ */
/** Lock protecting page tree. */
- cfs_spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- cfs_spinlock_t coh_lock_guard;
+ spinlock_t coh_page_guard;
+ /** Lock protecting lock list. */
+ spinlock_t coh_lock_guard;
/** @} locks */
/** Radix tree of cl_page's, cached for this object. */
struct radix_tree_root coh_tree;
*
* \todo XXX this can be read/write lock if needed.
*/
- cfs_spinlock_t coh_attr_guard;
- /**
- * Number of objects above this one: 0 for a top-object, 1 for its
- * sub-object, etc.
- */
- unsigned coh_nesting;
+ spinlock_t coh_attr_guard;
+ /**
+ * Number of objects above this one: 0 for a top-object, 1 for its
+ * sub-object, etc.
+ */
+ unsigned coh_nesting;
};
/**
*/
const enum cl_page_state cp_state;
/** Protect to get and put page, see cl_page_put and cl_vmpage_page */
- cfs_spinlock_t cp_lock;
- /**
- * Linkage of pages within some group. Protected by
- * cl_page::cp_mutex. */
- cfs_list_t cp_batch;
- /** Mutex serializing membership of a page in a batch. */
- cfs_mutex_t cp_mutex;
+ spinlock_t cp_lock;
+ /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
+ cfs_list_t cp_batch;
+ /** Mutex serializing membership of a page in a batch. */
+ struct mutex cp_mutex;
/** Linkage of pages within cl_req. */
cfs_list_t cp_flight;
/** Transfer error. */
*
* \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
*/
- cfs_mutex_t cll_guard;
+ struct mutex cll_guard;
cfs_task_t *cll_guarder;
int cll_depth;
struct dt_object *los_obj;
/* data used to generate new fids */
- cfs_mutex_t los_id_lock;
+ struct mutex los_id_lock;
__u64 los_seq;
__u32 los_last_oid;
};
struct cl_client_cache {
cfs_atomic_t ccc_users; /* # of users (OSCs) of this data */
cfs_list_t ccc_lru; /* LRU list of cached clean pages */
- cfs_spinlock_t ccc_lru_lock; /* lock for list */
+ spinlock_t ccc_lru_lock; /* lock for list */
cfs_atomic_t ccc_lru_left; /* # of LRU entries available */
unsigned long ccc_lru_max; /* Max # of LRU entries possible */
unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
static __inline__ int ext2_set_bit(int nr, void *addr)
{
#ifdef __BIG_ENDIAN
- return cfs_set_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
+ return set_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
#else
- return cfs_set_bit(nr, addr);
+ return set_bit(nr, addr);
#endif
}
-static __inline__ int ext2_clear_bit(int nr, void *addr)
+static inline int ext2_clear_bit(int nr, void *addr)
{
#ifdef __BIG_ENDIAN
- return cfs_clear_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
+ return clear_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
#else
- return cfs_clear_bit(nr, addr);
+ return clear_bit(nr, addr);
#endif
}
__const__ unsigned char *tmp = (__const__ unsigned char *) addr;
return (tmp[nr >> 3] >> (nr & 7)) & 1;
#else
- return cfs_test_bit(nr, addr);
+ return test_bit(nr, addr);
#endif
}
#include <linux/lustre_patchless_compat.h>
#ifdef HAVE_FS_STRUCT_RWLOCK
-# define LOCK_FS_STRUCT(fs) cfs_write_lock(&(fs)->lock)
-# define UNLOCK_FS_STRUCT(fs) cfs_write_unlock(&(fs)->lock)
+# define LOCK_FS_STRUCT(fs) write_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs) write_unlock(&(fs)->lock)
#else
-# define LOCK_FS_STRUCT(fs) cfs_spin_lock(&(fs)->lock)
-# define UNLOCK_FS_STRUCT(fs) cfs_spin_unlock(&(fs)->lock)
+# define LOCK_FS_STRUCT(fs) spin_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs) spin_unlock(&(fs)->lock)
#endif
#ifdef HAVE_FS_STRUCT_USE_PATH
#endif
#ifdef HAVE_RW_TREE_LOCK
-#define TREE_READ_LOCK_IRQ(mapping) read_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock)
+#define TREE_READ_LOCK_IRQ(mapping) read_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock)
#else
-#define TREE_READ_LOCK_IRQ(mapping) cfs_spin_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) cfs_spin_unlock_irq(&(mapping)->tree_lock)
+#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock)
#endif
#ifdef HAVE_UNREGISTER_BLKDEV_RETURN_INT
int (* fs_map_inode_pages)(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int *created, int create,
- cfs_mutex_t *sem);
+ struct mutex *sem);
int (* fs_write_record)(struct file *, void *, int size, loff_t *,
int force_sync);
int (* fs_read_record)(struct file *, void *, int size, loff_t *);
struct inode *inode,
struct page **page, int pages,
unsigned long *blocks, int *created,
- int create, cfs_mutex_t *mutex)
+ int create, struct mutex *mutex)
{
return obd->obd_fsops->fs_map_inode_pages(inode, page, pages, blocks,
created, create, mutex);
/* XXX copy & paste from 2.6.15 kernel */
static inline void ll_remove_from_page_cache(struct page *page)
{
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = page->mapping;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!PageLocked(page));
#ifdef HAVE_RW_TREE_LOCK
- write_lock_irq(&mapping->tree_lock);
+ write_lock_irq(&mapping->tree_lock);
#else
spin_lock_irq(&mapping->tree_lock);
#endif
- radix_tree_delete(&mapping->page_tree, page->index);
- page->mapping = NULL;
- mapping->nrpages--;
- __dec_zone_page_state(page, NR_FILE_PAGES);
+ radix_tree_delete(&mapping->page_tree, page->index);
+ page->mapping = NULL;
+ mapping->nrpages--;
+ __dec_zone_page_state(page, NR_FILE_PAGES);
#ifdef HAVE_RW_TREE_LOCK
- write_unlock_irq(&mapping->tree_lock);
+ write_unlock_irq(&mapping->tree_lock);
#else
spin_unlock_irq(&mapping->tree_lock);
#endif
#define CLIENT_OBD_LIST_LOCK_DEBUG 1
typedef struct {
- cfs_spinlock_t lock;
+ spinlock_t lock;
#ifdef CLIENT_OBD_LIST_LOCK_DEBUG
- unsigned long time;
- struct task_struct *task;
- const char *func;
- int line;
+ unsigned long time;
+ struct task_struct *task;
+ const char *func;
+ int line;
#endif
-
} client_obd_lock_t;
#ifdef CLIENT_OBD_LIST_LOCK_DEBUG
const char *func,
int line)
{
- unsigned long cur = jiffies;
- while (1) {
- if (cfs_spin_trylock(&lock->lock)) {
+ unsigned long cur = jiffies;
+ while (1) {
+ if (spin_trylock(&lock->lock)) {
LASSERT(lock->task == NULL);
lock->task = current;
lock->func = func;
static inline void client_obd_list_unlock(client_obd_lock_t *lock)
{
- LASSERT(lock->task != NULL);
- lock->task = NULL;
- lock->time = jiffies;
- cfs_spin_unlock(&lock->lock);
+ LASSERT(lock->task != NULL);
+ lock->task = NULL;
+ lock->time = jiffies;
+ spin_unlock(&lock->lock);
}
#else /* ifdef CLIENT_OBD_LIST_LOCK_DEBUG */
static inline void client_obd_list_lock(client_obd_lock_t *lock)
{
- cfs_spin_lock(&lock->lock);
+ spin_lock(&lock->lock);
}
static inline void client_obd_list_unlock(client_obd_lock_t *lock)
{
- cfs_spin_unlock(&lock->lock);
+ spin_unlock(&lock->lock);
}
#endif /* ifdef CLIENT_OBD_LIST_LOCK_DEBUG */
static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
{
- cfs_spin_lock_init(&lock->lock);
+ spin_lock_init(&lock->lock);
}
static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
/* if we find more consumers this could be generalized */
#define OBD_HIST_MAX 32
struct obd_histogram {
- cfs_spinlock_t oh_lock;
- unsigned long oh_buckets[OBD_HIST_MAX];
+ spinlock_t oh_lock;
+ unsigned long oh_buckets[OBD_HIST_MAX];
};
enum {
};
struct lprocfs_stats {
- unsigned short ls_num; /* # of counters */
- unsigned short ls_biggest_alloc_num;
- /* 1 + the highest slot index which has
- * been allocated, the 0th entry is
- * a statically intialized template */
- int ls_flags; /* See LPROCFS_STATS_FLAG_* */
+ unsigned short ls_num; /* # of counters */
+ unsigned short ls_biggest_alloc_num;
+ /* 1 + the highest slot index which has
+ * been allocated, the 0th entry is
+ * a statically intialized template */
+ int ls_flags; /* See LPROCFS_STATS_FLAG_* */
/* Lock used when there are no percpu stats areas; For percpu stats,
* it is used to protect ls_biggest_alloc_num change */
- cfs_spinlock_t ls_lock;
- struct lprocfs_percpu *ls_percpu[0];
+ spinlock_t ls_lock;
+ struct lprocfs_percpu *ls_percpu[0];
};
#define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC)
struct obd_job_stats {
cfs_hash_t *ojs_hash;
cfs_list_t ojs_list;
- cfs_rwlock_t ojs_lock; /* protect the obj_list */
+ rwlock_t ojs_lock; /* protect the obj_list */
cntr_init_callback ojs_cntr_init_fn;
int ojs_cntr_num;
int ojs_cleanup_interval;
/* non-percpu counter stats */
if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- cfs_spin_lock_irqsave(&stats->ls_lock, *flags);
+ spin_lock_irqsave(&stats->ls_lock, *flags);
else
- cfs_spin_lock(&stats->ls_lock);
+ spin_lock(&stats->ls_lock);
return 0;
case LPROCFS_GET_NUM_CPU:
/* non-percpu counter stats */
if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
- cfs_spin_lock_irqsave(&stats->ls_lock, *flags);
+ spin_lock_irqsave(&stats->ls_lock, *flags);
else
- cfs_spin_lock(&stats->ls_lock);
+ spin_lock(&stats->ls_lock);
return 1;
}
}
case LPROCFS_GET_SMP_ID:
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
- cfs_spin_unlock_irqrestore(&stats->ls_lock,
+ spin_unlock_irqrestore(&stats->ls_lock,
*flags);
} else {
- cfs_spin_unlock(&stats->ls_lock);
+ spin_unlock(&stats->ls_lock);
}
} else {
cfs_put_cpu();
case LPROCFS_GET_NUM_CPU:
if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
- cfs_spin_unlock_irqrestore(&stats->ls_lock,
+ spin_unlock_irqrestore(&stats->ls_lock,
*flags);
} else {
- cfs_spin_unlock(&stats->ls_lock);
+ spin_unlock(&stats->ls_lock);
}
}
return;
* the import in a client obd_device for a lprocfs entry */
#define LPROCFS_CLIMP_CHECK(obd) do { \
typecheck(struct obd_device *, obd); \
- cfs_down_read(&(obd)->u.cli.cl_sem); \
+ down_read(&(obd)->u.cli.cl_sem); \
if ((obd)->u.cli.cl_import == NULL) { \
- cfs_up_read(&(obd)->u.cli.cl_sem); \
+ up_read(&(obd)->u.cli.cl_sem); \
return -ENODEV; \
} \
} while(0)
#define LPROCFS_CLIMP_EXIT(obd) \
- cfs_up_read(&(obd)->u.cli.cl_sem);
+ up_read(&(obd)->u.cli.cl_sem);
/* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
* by ls_ld_lock.
**/
cfs_list_t ls_ld_linkage;
- cfs_spinlock_t ls_ld_lock;
+ spinlock_t ls_ld_lock;
- /**
- * lu_site stats
- */
- struct lprocfs_stats *ls_stats;
- struct lprocfs_stats *ls_time_stats;
+ /**
+ * lu_site stats
+ */
+ struct lprocfs_stats *ls_stats;
+ struct lprocfs_stats *ls_time_stats;
/**
* XXX: a hack! fld has to find md_site via site, remove when possible
*/
- struct md_site *ld_md_site;
+ struct md_site *ld_md_site;
};
static inline struct lu_site_bkt_data *
*/
static inline int lu_object_is_dying(const struct lu_object_header *h)
{
- return cfs_test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
+ return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
}
void lu_object_put(const struct lu_env *env, struct lu_object *o);
* etc.) refer to.
*/
struct lu_ref {
- /**
- * Spin-lock protecting lu_ref::lf_list.
- */
- cfs_spinlock_t lf_guard;
+ /**
+ * Spin-lock protecting lu_ref::lf_list.
+ */
+ spinlock_t lf_guard;
/**
* List of all outstanding references (each represented by struct
* lu_ref_link), pointing to this object.
/** Server last transaction number */
__u64 lut_last_transno;
/** Lock protecting last transaction number */
- cfs_spinlock_t lut_translock;
- /** Lock protecting client bitmap */
- cfs_spinlock_t lut_client_bitmap_lock;
- /** Bitmap of known clients */
- unsigned long *lut_client_bitmap;
+ spinlock_t lut_translock;
+ /** Lock protecting client bitmap */
+ spinlock_t lut_client_bitmap_lock;
+ /** Bitmap of known clients */
+ unsigned long *lut_client_bitmap;
};
typedef void (*tgt_cb_t)(struct lu_target *lut, __u64 transno,
struct lustre_capa c_capa; /* capa */
cfs_atomic_t c_refc; /* ref count */
cfs_time_t c_expiry; /* jiffies */
- cfs_spinlock_t c_lock; /* protect capa content */
- int c_site;
+ spinlock_t c_lock; /* protect capa content */
+ int c_site;
- union {
- struct client_capa cli;
- struct target_capa tgt;
- } u;
+ union {
+ struct client_capa cli;
+ struct target_capa tgt;
+ } u;
};
enum {
/* obdclass/capa.c */
extern cfs_list_t capa_list[];
-extern cfs_spinlock_t capa_lock;
+extern spinlock_t capa_lock;
extern int capa_count[];
extern cfs_mem_cache_t *capa_cachep;
CFS_INIT_LIST_HEAD(&ocapa->c_list);
cfs_atomic_set(&ocapa->c_refc, 1);
- cfs_spin_lock_init(&ocapa->c_lock);
+ spin_lock_init(&ocapa->c_lock);
ocapa->c_site = site;
if (ocapa->c_site == CAPA_SITE_CLIENT)
CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
/**
* Lock for protecting slv/clv updates.
*/
- cfs_spinlock_t pl_lock;
+ spinlock_t pl_lock;
/**
* Number of allowed locks in in pool, both, client and server side.
*/
/**
* serialize
*/
- cfs_spinlock_t ns_lock;
+ spinlock_t ns_lock;
/**
* big refcount (by bucket)
* Internal spinlock protects l_resource. we should hold this lock
* first before grabbing res_lock.
*/
- cfs_spinlock_t l_lock;
+ spinlock_t l_lock;
/**
* ldlm_lock_change_resource() can change this.
*/
};
struct ldlm_resource {
- struct ldlm_ns_bucket *lr_ns_bucket;
+ struct ldlm_ns_bucket *lr_ns_bucket;
- /* protected by ns_hash_lock */
- cfs_hlist_node_t lr_hash;
- cfs_spinlock_t lr_lock;
+ /* protected by ns_hash_lock */
+ cfs_hlist_node_t lr_hash;
+ spinlock_t lr_lock;
/* protected by lr_lock */
cfs_list_t lr_granted;
/* Server-side-only lock value block elements */
/** to serialize lvbo_init */
- cfs_mutex_t lr_lvb_mutex;
+ struct mutex lr_lvb_mutex;
__u32 lr_lvb_len;
/** protect by lr_lock */
void *lr_lvb_data;
static inline void lock_res(struct ldlm_resource *res)
{
- cfs_spin_lock(&res->lr_lock);
+ spin_lock(&res->lr_lock);
}
static inline void lock_res_nested(struct ldlm_resource *res,
enum lock_res_type mode)
{
- cfs_spin_lock_nested(&res->lr_lock, mode);
+ spin_lock_nested(&res->lr_lock, mode);
}
static inline void unlock_res(struct ldlm_resource *res)
{
- cfs_spin_unlock(&res->lr_lock);
+ spin_unlock(&res->lr_lock);
}
static inline void check_res_locked(struct ldlm_resource *res)
* Target-specific export data
*/
struct tg_export_data {
- /** Protects led_lcd below */
- cfs_mutex_t ted_lcd_lock;
- /** Per-client data for each export */
- struct lsd_client_data *ted_lcd;
- /** Offset of record in last_rcvd file */
- loff_t ted_lr_off;
- /** Client index in last_rcvd file */
- int ted_lr_idx;
+ /** Protects led_lcd below */
+ struct mutex ted_lcd_lock;
+ /** Per-client data for each export */
+ struct lsd_client_data *ted_lcd;
+ /** Offset of record in last_rcvd file */
+ loff_t ted_lr_off;
+ /** Client index in last_rcvd file */
+ int ted_lr_idx;
};
/**
* MDT-specific export data
*/
struct mdt_export_data {
- struct tg_export_data med_ted;
- /** List of all files opened by client on this MDT */
- cfs_list_t med_open_head;
- cfs_spinlock_t med_open_lock; /* lock med_open_head, mfd_list*/
- /** Bitmask of all ibit locks this MDT understands */
- __u64 med_ibits_known;
- cfs_mutex_t med_idmap_mutex;
- struct lustre_idmap_table *med_idmap;
+ struct tg_export_data med_ted;
+ /** List of all files opened by client on this MDT */
+ cfs_list_t med_open_head;
+ spinlock_t med_open_lock; /* med_open_head, mfd_list */
+ /** Bitmask of all ibit locks this MDT understands */
+ __u64 med_ibits_known;
+ struct mutex med_idmap_mutex;
+ struct lustre_idmap_table *med_idmap;
};
struct ec_export_data { /* echo client */
/* In-memory access to client data from OST struct */
/** Filter (oss-side) specific import data */
struct filter_export_data {
- struct tg_export_data fed_ted;
- cfs_spinlock_t fed_lock; /**< protects fed_mod_list */
+ struct tg_export_data fed_ted;
+ spinlock_t fed_lock; /**< protects fed_mod_list */
long fed_dirty; /* in bytes */
long fed_grant; /* in bytes */
cfs_list_t fed_mod_list; /* files being modified */
};
struct mgs_export_data {
- cfs_list_t med_clients; /* mgc fs client via this exp */
- cfs_spinlock_t med_lock; /* protect med_clients */
+ cfs_list_t med_clients; /* mgc fs client via this exp */
+ spinlock_t med_lock; /* protect med_clients */
};
/**
cfs_atomic_t exp_locks_count; /** Lock references */
#if LUSTRE_TRACKS_LOCK_EXP_REFS
cfs_list_t exp_locks_list;
- cfs_spinlock_t exp_locks_list_guard;
+ spinlock_t exp_locks_list_guard;
#endif
/** UUID of client connected to this export */
struct obd_uuid exp_client_uuid;
cfs_hash_t *exp_flock_hash;
cfs_list_t exp_outstanding_replies;
cfs_list_t exp_uncommitted_replies;
- cfs_spinlock_t exp_uncommitted_replies_lock;
+ spinlock_t exp_uncommitted_replies_lock;
/** Last committed transno for this export */
__u64 exp_last_committed;
/** When was last request received */
* protects exp_flags, exp_outstanding_replies and the change
* of exp_imp_reverse
*/
- cfs_spinlock_t exp_lock;
+ spinlock_t exp_lock;
/** Compatibility flags for this export */
__u64 exp_connect_flags;
enum obd_option exp_flags;
cfs_time_t exp_flvr_expire[2]; /* seconds */
/** protects exp_hp_rpcs */
- cfs_spinlock_t exp_rpc_lock;
- cfs_list_t exp_hp_rpcs; /* (potential) HP RPCs */
+ spinlock_t exp_rpc_lock;
+ cfs_list_t exp_hp_rpcs; /* (potential) HP RPCs */
/** blocking dlm lock list, protected by exp_bl_list_lock */
cfs_list_t exp_bl_list;
- cfs_spinlock_t exp_bl_list_lock;
+ spinlock_t exp_bl_list_lock;
/** Target specific data */
union {
struct lu_client_seq {
/* Sequence-controller export. */
struct obd_export *lcs_exp;
- cfs_mutex_t lcs_mutex;
+ struct mutex lcs_mutex;
/*
* Range of allowed for allocation sequeces. When using lu_client_seq on
struct lu_client_seq *lss_cli;
/* Mutex for protecting allocation */
- cfs_mutex_t lss_mutex;
+ struct mutex lss_mutex;
/*
* Service uuid, passed from MDT + seq name to form unique seq name to
/**
* Protect index modifications */
- cfs_mutex_t lsf_lock;
+ struct mutex lsf_lock;
/**
* Fld service name in form "fld-srv-lustre-MDTXXX" */
/**
* Lock protecting exports list and fld_hash. */
- cfs_spinlock_t lcf_lock;
+ spinlock_t lcf_lock;
/**
* Client FLD cache. */
/* newly added fields to handle the RCU issue. -jxiong */
cfs_rcu_head_t h_rcu;
- cfs_spinlock_t h_lock;
+ spinlock_t h_lock;
unsigned int h_size:31;
unsigned int h_in:1;
};
};
struct lustre_idmap_table {
- cfs_spinlock_t lit_lock;
- cfs_list_t lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
+ spinlock_t lit_lock;
+ cfs_list_t lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
};
extern void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist);
#define AT_FLG_NOHIST 0x1 /* use last reported value only */
struct adaptive_timeout {
- time_t at_binstart; /* bin start time */
- unsigned int at_hist[AT_BINS]; /* timeout history bins */
- unsigned int at_flags;
- unsigned int at_current; /* current timeout value */
- unsigned int at_worst_ever; /* worst-ever timeout value */
- time_t at_worst_time; /* worst-ever timeout timestamp */
- cfs_spinlock_t at_lock;
+ time_t at_binstart; /* bin start time */
+ unsigned int at_hist[AT_BINS]; /* timeout history bins */
+ unsigned int at_flags;
+ unsigned int at_current; /* current timeout value */
+ unsigned int at_worst_ever; /* worst-ever timeout value */
+ time_t at_worst_time; /* worst-ever timeout timestamp */
+ spinlock_t at_lock;
};
struct ptlrpc_at_array {
* @{
*/
struct ptlrpc_sec *imp_sec;
- cfs_mutex_t imp_sec_mutex;
+ struct mutex imp_sec_mutex;
cfs_time_t imp_sec_expire;
/** @} */
struct obd_import_conn *imp_conn_current;
/** Protects flags, level, generation, conn_cnt, *_list */
- cfs_spinlock_t imp_lock;
+ spinlock_t imp_lock;
/* flags */
unsigned long imp_no_timeout:1, /* timeouts are disabled */
}
static inline void at_init(struct adaptive_timeout *at, int val, int flags) {
memset(at, 0, sizeof(*at));
- cfs_spin_lock_init(&at->at_lock);
+ spin_lock_init(&at->at_lock);
at->at_flags = flags;
at_reset(at, val);
}
/* l_lock.c */
struct lustre_lock {
- int l_depth;
- cfs_task_t *l_owner;
- cfs_semaphore_t l_sem;
- cfs_spinlock_t l_spin;
+ int l_depth;
+ cfs_task_t *l_owner;
+ struct semaphore l_sem;
+ spinlock_t l_spin;
};
void l_lock_init(struct lustre_lock *);
* under ->lco_lock.
*/
__u64 lco_flags;
- cfs_mutex_t lco_lock;
+ struct mutex lco_lock;
struct obd_export *lco_md_exp;
struct obd_export *lco_dt_exp;
};
/* In-memory descriptor for a log object or log catalog */
struct llog_handle {
- cfs_rw_semaphore_t lgh_lock;
- cfs_spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */
+ struct rw_semaphore lgh_lock;
+ spinlock_t lgh_hdr_lock; /* protect lgh_hdr data */
struct llog_logid lgh_id; /* id of this log */
struct llog_log_hdr *lgh_hdr;
struct file *lgh_file;
struct llog_handle *loc_handle;
struct llog_commit_master *loc_lcm;
struct llog_canceld_ctxt *loc_llcd;
- cfs_mutex_t loc_mutex; /* protects loc_llcd and loc_imp */
+ struct mutex loc_mutex; /* protect loc_llcd and loc_imp */
cfs_atomic_t loc_refcount;
void *llog_proc_cb;
long loc_flags; /* flags, see above defines */
/**
* Lock protecting list of llcds.
*/
- cfs_spinlock_t lcm_lock;
+ spinlock_t lcm_lock;
/**
* Llcds in flight for debugging purposes.
*/
static inline void llog_group_init(struct obd_llog_group *olg, int group)
{
- cfs_waitq_init(&olg->olg_waitq);
- cfs_spin_lock_init(&olg->olg_lock);
- cfs_mutex_init(&olg->olg_cat_processing);
- olg->olg_seq = group;
+ cfs_waitq_init(&olg->olg_waitq);
+ spin_lock_init(&olg->olg_lock);
+ mutex_init(&olg->olg_cat_processing);
+ olg->olg_seq = group;
}
static inline void llog_group_set_export(struct obd_llog_group *olg,
struct obd_export *exp)
{
- LASSERT(exp != NULL);
-
- cfs_spin_lock(&olg->olg_lock);
- if (olg->olg_exp != NULL && olg->olg_exp != exp)
- CWARN("%s: export for group %d is changed: 0x%p -> 0x%p\n",
- exp->exp_obd->obd_name, olg->olg_seq,
- olg->olg_exp, exp);
- olg->olg_exp = exp;
- cfs_spin_unlock(&olg->olg_lock);
+ LASSERT(exp != NULL);
+
+ spin_lock(&olg->olg_lock);
+ if (olg->olg_exp != NULL && olg->olg_exp != exp)
+ CWARN("%s: export for group %d is changed: 0x%p -> 0x%p\n",
+ exp->exp_obd->obd_name, olg->olg_seq,
+ olg->olg_exp, exp);
+ olg->olg_exp = exp;
+ spin_unlock(&olg->olg_lock);
}
static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
struct llog_ctxt *ctxt, int index)
{
- LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
-
- cfs_spin_lock(&olg->olg_lock);
- if (olg->olg_ctxts[index] != NULL) {
- cfs_spin_unlock(&olg->olg_lock);
- return -EEXIST;
- }
- olg->olg_ctxts[index] = ctxt;
- cfs_spin_unlock(&olg->olg_lock);
- return 0;
+ LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
+
+ spin_lock(&olg->olg_lock);
+ if (olg->olg_ctxts[index] != NULL) {
+ spin_unlock(&olg->olg_lock);
+ return -EEXIST;
+ }
+ olg->olg_ctxts[index] = ctxt;
+ spin_unlock(&olg->olg_lock);
+ return 0;
}
static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg,
int index)
{
- struct llog_ctxt *ctxt;
+ struct llog_ctxt *ctxt;
- LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
+ LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
- cfs_spin_lock(&olg->olg_lock);
- if (olg->olg_ctxts[index] == NULL) {
- ctxt = NULL;
- } else {
- ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
- }
- cfs_spin_unlock(&olg->olg_lock);
- return ctxt;
+ spin_lock(&olg->olg_lock);
+ if (olg->olg_ctxts[index] == NULL)
+ ctxt = NULL;
+ else
+ ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
+ spin_unlock(&olg->olg_lock);
+ return ctxt;
}
static inline void llog_group_clear_ctxt(struct obd_llog_group *olg, int index)
{
LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
- cfs_spin_lock(&olg->olg_lock);
+ spin_lock(&olg->olg_lock);
olg->olg_ctxts[index] = NULL;
- cfs_spin_unlock(&olg->olg_lock);
+ spin_unlock(&olg->olg_lock);
}
static inline struct llog_ctxt *llog_get_context(struct obd_device *obd,
struct obd_device;
struct mdc_rpc_lock {
- cfs_mutex_t rpcl_mutex;
+ struct mutex rpcl_mutex;
struct lookup_intent *rpcl_it;
int rpcl_fakes;
};
static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
{
- cfs_mutex_init(&lck->rpcl_mutex);
+ mutex_init(&lck->rpcl_mutex);
lck->rpcl_it = NULL;
}
* Only when all fake requests are finished can normal requests
* be sent, to ensure they are recoverable again. */
again:
- cfs_mutex_lock(&lck->rpcl_mutex);
+ mutex_lock(&lck->rpcl_mutex);
if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM)) {
lck->rpcl_it = MDC_FAKE_RPCL_IT;
lck->rpcl_fakes++;
- cfs_mutex_unlock(&lck->rpcl_mutex);
+ mutex_unlock(&lck->rpcl_mutex);
return;
}
* in this extremely rare case, just have low overhead in
* the common case when it isn't true. */
while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
- cfs_mutex_unlock(&lck->rpcl_mutex);
+ mutex_unlock(&lck->rpcl_mutex);
cfs_schedule_timeout(cfs_time_seconds(1) / 4);
goto again;
}
goto out;
if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
- cfs_mutex_lock(&lck->rpcl_mutex);
+ mutex_lock(&lck->rpcl_mutex);
LASSERTF(lck->rpcl_fakes > 0, "%d\n", lck->rpcl_fakes);
lck->rpcl_fakes--;
lck->rpcl_it = NULL;
}
- cfs_mutex_unlock(&lck->rpcl_mutex);
+ mutex_unlock(&lck->rpcl_mutex);
out:
EXIT;
}
* locked so that any old caller can communicate requests to
* the set holder who can then fold them into the lock-free set
*/
- cfs_spinlock_t set_new_req_lock;
+ spinlock_t set_new_req_lock;
/** List of new yet unsent requests. Only used with ptlrpcd now. */
cfs_list_t set_new_requests;
cfs_list_t rs_debug_list;
#endif
/** A spinlock to protect the reply state flags */
- cfs_spinlock_t rs_lock;
+ spinlock_t rs_lock;
/** Reply state flags */
unsigned long rs_difficult:1; /* ACK/commit stuff */
unsigned long rs_no_ack:1; /* no ACK, even for
* any allocations (to avoid e.g. OOM).
*/
struct ptlrpc_request_pool {
- /** Locks the list */
- cfs_spinlock_t prp_lock;
+ /** Locks the list */
+ spinlock_t prp_lock;
/** list of ptlrpc_request structs */
cfs_list_t prp_req_list;
/** Maximum message size that would fit into a rquest from this pool */
/** Lock to protect request flags and some other important bits, like
* rq_list
*/
- cfs_spinlock_t rq_lock;
- /** client-side flags are serialized by rq_lock */
+ spinlock_t rq_lock;
+ /** client-side flags are serialized by rq_lock */
unsigned int rq_intr:1, rq_replied:1, rq_err:1,
rq_timedout:1, rq_resend:1, rq_restart:1,
/**
/** client side */
unsigned long bd_registered:1;
/** For serialization with callback */
- cfs_spinlock_t bd_lock;
+ spinlock_t bd_lock;
/** Import generation when request for this bulk was sent */
int bd_import_generation;
/** Server side - export this bulk created for */
*/
struct ptlrpc_service {
/** serialize /proc operations */
- cfs_spinlock_t srv_lock;
+ spinlock_t srv_lock;
/** most often accessed fields */
/** chain thru all services */
cfs_list_t srv_list;
* rqbd list and incoming requests waiting for preprocess,
* threads starting & stopping are also protected by this lock.
*/
- cfs_spinlock_t scp_lock __cfs_cacheline_aligned;
+ spinlock_t scp_lock __cfs_cacheline_aligned;
/** total # req buffer descs allocated */
int scp_nrqbds_total;
/** # posted request buffers for receiving */
* serialize the following fields, used for processing requests
* sent to this portal
*/
- cfs_spinlock_t scp_req_lock __cfs_cacheline_aligned;
+ spinlock_t scp_req_lock __cfs_cacheline_aligned;
/** # reqs in either of the queues below */
/** reqs waiting for service */
cfs_list_t scp_req_pending;
* serialize the following fields, used for changes on
* adaptive timeout
*/
- cfs_spinlock_t scp_at_lock __cfs_cacheline_aligned;
+ spinlock_t scp_at_lock __cfs_cacheline_aligned;
/** estimated rpc service time */
struct adaptive_timeout scp_at_estimate;
/** reqs waiting for replies */
* serialize the following fields, used for processing
* replies for this portal
*/
- cfs_spinlock_t scp_rep_lock __cfs_cacheline_aligned;
+ spinlock_t scp_rep_lock __cfs_cacheline_aligned;
/** all the active replies */
cfs_list_t scp_rep_active;
#ifndef __KERNEL__
* Declaration of ptlrpcd control structure
*/
struct ptlrpcd_ctl {
- /**
- * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
- */
- unsigned long pc_flags;
- /**
- * Thread lock protecting structure fields.
- */
- cfs_spinlock_t pc_lock;
- /**
- * Start completion.
- */
- cfs_completion_t pc_starting;
- /**
- * Stop completion.
- */
- cfs_completion_t pc_finishing;
+ /**
+ * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
+ */
+ unsigned long pc_flags;
+ /**
+ * Thread lock protecting structure fields.
+ */
+ spinlock_t pc_lock;
+ /**
+ * Start completion.
+ */
+ struct completion pc_starting;
+ /**
+ * Stop completion.
+ */
+ struct completion pc_finishing;
/**
* Thread requests set.
*/
static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
{
- int rc;
+ int rc;
- LASSERT(desc != NULL);
+ LASSERT(desc != NULL);
- cfs_spin_lock(&desc->bd_lock);
- rc = desc->bd_network_rw;
- cfs_spin_unlock(&desc->bd_lock);
- return rc;
+ spin_lock(&desc->bd_lock);
+ rc = desc->bd_network_rw;
+ spin_unlock(&desc->bd_lock);
+ return rc;
}
#endif
if (!desc)
return 0;
- cfs_spin_lock(&desc->bd_lock);
- rc = desc->bd_network_rw;
- cfs_spin_unlock(&desc->bd_lock);
- return rc;
+ spin_lock(&desc->bd_lock);
+ rc = desc->bd_network_rw;
+ spin_unlock(&desc->bd_lock);
+ return rc;
}
#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
static inline int
ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
{
- int rc;
-
- cfs_spin_lock(&req->rq_lock);
- if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > cfs_time_current_sec()) {
- cfs_spin_unlock(&req->rq_lock);
- return 1;
- }
- rc = req->rq_receiving_reply || req->rq_must_unlink;
- cfs_spin_unlock(&req->rq_lock);
- return rc;
+ int rc;
+
+ spin_lock(&req->rq_lock);
+ if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+ req->rq_reply_deadline > cfs_time_current_sec()) {
+ spin_unlock(&req->rq_lock);
+ return 1;
+ }
+ rc = req->rq_receiving_reply || req->rq_must_unlink;
+ spin_unlock(&req->rq_lock);
+ return rc;
}
static inline void
static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
{
- if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_no_resend = 1;
- cfs_spin_unlock(&req->rq_lock);
- }
- return req->rq_no_resend;
+ if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
+ spin_lock(&req->rq_lock);
+ req->rq_no_resend = 1;
+ spin_unlock(&req->rq_lock);
+ }
+ return req->rq_no_resend;
}
static inline int
unsigned int cc_early_expire:1;
unsigned long cc_flags;
struct vfs_cred cc_vcred;
- cfs_spinlock_t cc_lock;
+ spinlock_t cc_lock;
cfs_list_t cc_req_list; /* waiting reqs linked here */
cfs_list_t cc_gc_chain; /* linked to gc chain */
};
unsigned int ps_dying:1;
/** owning import */
struct obd_import *ps_import;
- cfs_spinlock_t ps_lock;
+ spinlock_t ps_lock;
/*
* garbage collection
struct md_upcall {
/** this lock protects upcall using against its removal
* read lock is for usage the upcall, write - for init/fini */
- cfs_rw_semaphore_t mu_upcall_sem;
+ struct rw_semaphore mu_upcall_sem;
/** device to call, upper layer normally */
struct md_device *mu_upcall_dev;
/** upcall function */
static inline void md_upcall_init(struct md_device *m, void *upcl)
{
- cfs_init_rwsem(&m->md_upcall.mu_upcall_sem);
- m->md_upcall.mu_upcall_dev = NULL;
- m->md_upcall.mu_upcall = upcl;
+ init_rwsem(&m->md_upcall.mu_upcall_sem);
+ m->md_upcall.mu_upcall_dev = NULL;
+ m->md_upcall.mu_upcall = upcl;
}
static inline void md_upcall_dev_set(struct md_device *m, struct md_device *up)
{
- cfs_down_write(&m->md_upcall.mu_upcall_sem);
- m->md_upcall.mu_upcall_dev = up;
- cfs_up_write(&m->md_upcall.mu_upcall_sem);
+ down_write(&m->md_upcall.mu_upcall_sem);
+ m->md_upcall.mu_upcall_dev = up;
+ up_write(&m->md_upcall.mu_upcall_sem);
}
static inline void md_upcall_fini(struct md_device *m)
{
- cfs_down_write(&m->md_upcall.mu_upcall_sem);
- m->md_upcall.mu_upcall_dev = NULL;
- m->md_upcall.mu_upcall = NULL;
- cfs_up_write(&m->md_upcall.mu_upcall_sem);
+ down_write(&m->md_upcall.mu_upcall_sem);
+ m->md_upcall.mu_upcall_dev = NULL;
+ m->md_upcall.mu_upcall = NULL;
+ up_write(&m->md_upcall.mu_upcall_sem);
}
static inline int md_do_upcall(const struct lu_env *env, struct md_device *m,
- enum md_upcall_event ev, void *data)
-{
- int rc = 0;
- cfs_down_read(&m->md_upcall.mu_upcall_sem);
- if (m->md_upcall.mu_upcall_dev != NULL &&
- m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall != NULL) {
- rc = m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall(env,
- m->md_upcall.mu_upcall_dev,
- ev, data);
- }
- cfs_up_read(&m->md_upcall.mu_upcall_sem);
- return rc;
+ enum md_upcall_event ev, void *data)
+{
+ int rc = 0;
+ down_read(&m->md_upcall.mu_upcall_sem);
+ if (m->md_upcall.mu_upcall_dev != NULL &&
+ m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall != NULL) {
+ rc = m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall(env,
+ m->md_upcall.mu_upcall_dev,
+ ev, data);
+ }
+ up_read(&m->md_upcall.mu_upcall_sem);
+ return rc;
}
struct md_object {
struct lov_stripe_md {
cfs_atomic_t lsm_refc;
- cfs_spinlock_t lsm_lock;
+ spinlock_t lsm_lock;
pid_t lsm_lock_owner; /* debugging */
/* maximum possible file size, might change as OSTs status changes,
char *typ_name;
int typ_refcnt;
struct lu_device_type *typ_lu;
- cfs_spinlock_t obd_type_lock;
+ spinlock_t obd_type_lock;
};
struct brw_page {
struct lu_target *obt_lut;
#endif
__u64 obt_mount_count;
- cfs_rw_semaphore_t obt_rwsem;
+ struct rw_semaphore obt_rwsem;
struct vfsmount *obt_vfsmnt;
struct file *obt_health_check_filp;
struct osd_properties obt_osd_properties;
cfs_dentry_t *fo_dentry_O;
cfs_dentry_t **fo_dentry_O_groups;
struct filter_subdirs *fo_dentry_O_sub;
- cfs_mutex_t fo_init_lock; /* group initialization lock */
- int fo_committed_group;
+ struct mutex fo_init_lock; /* group initialization lock*/
+ int fo_committed_group;
- cfs_spinlock_t fo_objidlock; /* protect fo_lastobjid */
+ spinlock_t fo_objidlock; /* protect fo_lastobjid */
- unsigned long fo_destroys_in_progress;
- cfs_mutex_t fo_create_locks[FILTER_SUBDIR_COUNT];
+ unsigned long fo_destroys_in_progress;
+ struct mutex fo_create_locks[FILTER_SUBDIR_COUNT];
cfs_list_t fo_export_list;
int fo_subdir_count;
int fo_tot_granted_clients;
obd_size fo_readcache_max_filesize;
- cfs_spinlock_t fo_flags_lock;
+ spinlock_t fo_flags_lock;
unsigned int fo_read_cache:1, /**< enable read-only cache */
fo_writethrough_cache:1,/**< read cache writes */
fo_mds_ost_sync:1, /**< MDS-OST orphan recovery*/
__u64 *fo_last_objids; /* last created objid for groups,
* protected by fo_objidlock */
- cfs_mutex_t fo_alloc_lock;
+ struct mutex fo_alloc_lock;
cfs_atomic_t fo_r_in_flight;
cfs_atomic_t fo_w_in_flight;
*/
struct cfs_hash *fo_iobuf_hash;
- cfs_list_t fo_llog_list;
- cfs_spinlock_t fo_llog_list_lock;
+ cfs_list_t fo_llog_list;
+ spinlock_t fo_llog_list_lock;
struct brw_stats fo_filter_stats;
/* sptlrpc stuff */
- cfs_rwlock_t fo_sptlrpc_lock;
+ rwlock_t fo_sptlrpc_lock;
struct sptlrpc_rule_set fo_sptlrpc_rset;
/* capability related */
struct mdc_rpc_lock;
struct obd_import;
struct client_obd {
- cfs_rw_semaphore_t cl_sem;
+ struct rw_semaphore cl_sem;
struct obd_uuid cl_target_uuid;
struct obd_import *cl_import; /* ptlrpc connection state */
int cl_conn_count;
struct mdc_rpc_lock *cl_close_lock;
/* mgc datastruct */
- cfs_semaphore_t cl_mgc_sem;
+ struct semaphore cl_mgc_sem;
struct vfsmount *cl_mgc_vfsmnt;
struct dentry *cl_mgc_configs_dir;
cfs_atomic_t cl_mgc_refcount;
/* */
struct echo_obd {
- struct obd_device_target eo_obt;
- struct obdo eo_oa;
- cfs_spinlock_t eo_lock;
- __u64 eo_lastino;
- struct lustre_handle eo_nl_lock;
- cfs_atomic_t eo_prep;
+ struct obd_device_target eo_obt;
+ struct obdo eo_oa;
+ spinlock_t eo_lock;
+ __u64 eo_lastino;
+ struct lustre_handle eo_nl_lock;
+ cfs_atomic_t eo_prep;
};
struct ost_obd {
- struct ptlrpc_service *ost_service;
- struct ptlrpc_service *ost_create_service;
- struct ptlrpc_service *ost_io_service;
- cfs_mutex_t ost_health_mutex;
+ struct ptlrpc_service *ost_service;
+ struct ptlrpc_service *ost_create_service;
+ struct ptlrpc_service *ost_io_service;
+ struct mutex ost_health_mutex;
};
struct echo_client_obd {
- struct obd_export *ec_exp; /* the local connection to osc/lov */
- cfs_spinlock_t ec_lock;
+ struct obd_export *ec_exp; /* the local connection to osc/lov */
+ spinlock_t ec_lock;
cfs_list_t ec_objects;
cfs_list_t ec_locks;
int ec_nstripes;
lov_obd->lov_tgts */
unsigned int op_count; /* number of OSTs in the array */
unsigned int op_size; /* allocated size of lp_array */
- cfs_rw_semaphore_t op_rw_sem; /* to protect ost_pool use */
+ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
};
/* Round-robin allocator data */
/* Stripe placement optimization */
struct lov_qos {
cfs_list_t lq_oss_list; /* list of OSSs that targets use */
- cfs_rw_semaphore_t lq_rw_sem;
+ struct rw_semaphore lq_rw_sem;
__u32 lq_active_oss_count;
unsigned int lq_prio_free; /* priority for free space */
unsigned int lq_threshold_rr;/* priority for rr */
struct lov_tgt_desc **lov_tgts; /* sparse array */
struct ost_pool lov_packed; /* all OSTs in a packed
array */
- cfs_mutex_t lov_lock;
+ struct mutex lov_lock;
struct obd_connect_data lov_ocd;
cfs_atomic_t lov_refcount;
__u32 lov_tgt_count; /* how many OBD's */
struct obd_export *ltd_exp;
int ltd_active; /* is this target up for requests */
int ltd_idx;
- cfs_mutex_t ltd_fid_mutex;
+ struct mutex ltd_fid_mutex;
};
enum placement_policy {
typedef enum placement_policy placement_policy_t;
struct lmv_obd {
- int refcount;
- struct lu_client_fld lmv_fld;
- cfs_spinlock_t lmv_lock;
+ int refcount;
+ struct lu_client_fld lmv_fld;
+ spinlock_t lmv_lock;
placement_policy_t lmv_placement;
struct lmv_desc desc;
struct obd_uuid cluuid;
int max_def_easize;
int max_cookiesize;
int server_timeout;
- cfs_mutex_t init_mutex;
+ struct mutex init_mutex;
struct lmv_tgt_desc *tgts;
int tgts_size;
};
struct target_recovery_data {
- svc_handler_t trd_recovery_handler;
- pid_t trd_processing_task;
- cfs_completion_t trd_starting;
- cfs_completion_t trd_finishing;
+ svc_handler_t trd_recovery_handler;
+ pid_t trd_processing_task;
+ struct completion trd_starting;
+ struct completion trd_finishing;
};
/**
int olg_seq;
struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
cfs_waitq_t olg_waitq;
- cfs_spinlock_t olg_lock;
- struct obd_export *olg_exp;
- int olg_initializing;
- cfs_mutex_t olg_cat_processing;
+ spinlock_t olg_lock;
+ struct obd_export *olg_exp;
+ int olg_initializing;
+ struct mutex olg_cat_processing;
};
/* corresponds to one of the obd's */
cfs_list_t obd_unlinked_exports;
cfs_list_t obd_delayed_exports;
int obd_num_exports;
- cfs_spinlock_t obd_nid_lock;
- struct ldlm_namespace *obd_namespace;
- struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
- /* a spinlock is OK for what we do now, may need a semaphore later */
- cfs_spinlock_t obd_dev_lock; /* protects obd bitfield above */
- cfs_mutex_t obd_dev_mutex;
- __u64 obd_last_committed;
- struct fsfilt_operations *obd_fsops;
- cfs_spinlock_t obd_osfs_lock;
- struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
- __u64 obd_osfs_age;
- struct lvfs_run_ctxt obd_lvfs_ctxt;
- struct obd_llog_group obd_olg; /* default llog group */
- struct obd_device *obd_observer;
- cfs_rw_semaphore_t obd_observer_link_sem;
+ spinlock_t obd_nid_lock;
+ struct ldlm_namespace *obd_namespace;
+ struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
+ /* a spinlock is OK for what we do now, may need a semaphore later */
+ spinlock_t obd_dev_lock; /* protect OBD bitfield above */
+ struct mutex obd_dev_mutex;
+ __u64 obd_last_committed;
+ struct fsfilt_operations *obd_fsops;
+ spinlock_t obd_osfs_lock;
+ struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
+ __u64 obd_osfs_age;
+ struct lvfs_run_ctxt obd_lvfs_ctxt;
+ struct obd_llog_group obd_olg; /* default llog group */
+ struct obd_device *obd_observer;
+ struct rw_semaphore obd_observer_link_sem;
struct obd_notify_upcall obd_upcall;
struct obd_export *obd_self_export;
/* list of exports in LRU order, for ping evictor, with obd_dev_lock */
int obd_delayed_clients;
/* this lock protects all recovery list_heads, timer and
* obd_next_recovery_transno value */
- cfs_spinlock_t obd_recovery_task_lock;
+ spinlock_t obd_recovery_task_lock;
__u64 obd_next_recovery_transno;
int obd_replayed_requests;
int obd_requests_queued_for_recovery;
/**
* Ldlm pool part. Save last calculated SLV and Limit.
*/
- cfs_rwlock_t obd_pool_lock;
+ rwlock_t obd_pool_lock;
int obd_pool_limit;
__u64 obd_pool_slv;
/* OBD Device Declarations */
extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
-extern cfs_rwlock_t obd_dev_lock;
+extern rwlock_t obd_dev_lock;
/* OBD Operations Declarations */
extern struct obd_device *class_conn2obd(struct lustre_handle *);
struct config_llog_data *cld_sptlrpc;/* depended sptlrpc log */
struct config_llog_data *cld_recover; /* imperative recover log */
struct obd_export *cld_mgcexp;
- cfs_mutex_t cld_lock;
+ struct mutex cld_lock;
int cld_type;
unsigned int cld_stopping:1, /* we were told to stop
* watching */
/* If we set up but never connected, the
client import will not have been cleaned. */
- cfs_down_write(&obd->u.cli.cl_sem);
+ down_write(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import) {
struct obd_import *imp;
imp = obd->u.cli.cl_import;
client_destroy_import(imp);
obd->u.cli.cl_import = NULL;
}
- cfs_up_write(&obd->u.cli.cl_sem);
+ up_write(&obd->u.cli.cl_sem);
EXIT;
}
obd->obd_name, &obd->obd_osfs,
obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
- cfs_spin_lock(&obd->obd_osfs_lock);
- memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
- cfs_spin_unlock(&obd->obd_osfs_lock);
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
+ spin_unlock(&obd->obd_osfs_lock);
oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
if (oinfo->oi_cb_up)
oinfo->oi_cb_up(oinfo, 0);
if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
rc = OBP(obd, statfs)(env, exp, osfs, max_age, flags);
if (rc == 0) {
- cfs_spin_lock(&obd->obd_osfs_lock);
- memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
- obd->obd_osfs_age = cfs_time_current_64();
- cfs_spin_unlock(&obd->obd_osfs_lock);
- }
- } else {
- CDEBUG(D_SUPER,"%s: use %p cache blocks "LPU64"/"LPU64
- " objects "LPU64"/"LPU64"\n",
- obd->obd_name, &obd->obd_osfs,
- obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
- obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
- cfs_spin_lock(&obd->obd_osfs_lock);
- memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
- cfs_spin_unlock(&obd->obd_osfs_lock);
- }
- RETURN(rc);
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
+ obd->obd_osfs_age = cfs_time_current_64();
+ spin_unlock(&obd->obd_osfs_lock);
+ }
+ } else {
+ CDEBUG(D_SUPER, "%s: use %p cache blocks "LPU64"/"LPU64
+ " objects "LPU64"/"LPU64"\n",
+ obd->obd_name, &obd->obd_osfs,
+ obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
+ obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
+ spin_unlock(&obd->obd_osfs_lock);
+ }
+ RETURN(rc);
}
static inline int obd_sync_rqset(struct obd_export *exp, struct obd_info *oinfo,
{
ENTRY;
OBD_CHECK_DEV(obd);
- cfs_down_write(&obd->obd_observer_link_sem);
+ down_write(&obd->obd_observer_link_sem);
if (obd->obd_observer && observer) {
- cfs_up_write(&obd->obd_observer_link_sem);
+ up_write(&obd->obd_observer_link_sem);
RETURN(-EALREADY);
}
obd->obd_observer = observer;
- cfs_up_write(&obd->obd_observer_link_sem);
+ up_write(&obd->obd_observer_link_sem);
RETURN(0);
}
struct obd_device **observer)
{
ENTRY;
- cfs_down_read(&obd->obd_observer_link_sem);
+ down_read(&obd->obd_observer_link_sem);
if (!obd->obd_observer) {
*observer = NULL;
- cfs_up_read(&obd->obd_observer_link_sem);
+ up_read(&obd->obd_observer_link_sem);
RETURN(-ENOENT);
}
*observer = obd->obd_observer;
static inline int obd_unpin_observer(struct obd_device *obd)
{
ENTRY;
- cfs_up_read(&obd->obd_observer_link_sem);
+ up_read(&obd->obd_observer_link_sem);
RETURN(0);
}
* A mutex serializing calls to slp_inode_fini() under extreme memory
* pressure, when environments cannot be allocated.
*/
-static CFS_DEFINE_MUTEX(ccc_inode_fini_guard);
+static DEFINE_MUTEX(ccc_inode_fini_guard);
static int dummy_refcheck;
int ccc_global_init(struct lu_device_type *device_type)
env = cl_env_get(&refcheck);
emergency = IS_ERR(env);
if (emergency) {
- cfs_mutex_lock(&ccc_inode_fini_guard);
+ mutex_lock(&ccc_inode_fini_guard);
LASSERT(ccc_inode_fini_env != NULL);
cl_env_implant(ccc_inode_fini_env, &refcheck);
env = ccc_inode_fini_env;
lli->lli_clob = NULL;
if (emergency) {
cl_env_unplant(ccc_inode_fini_env, &refcheck);
- cfs_mutex_unlock(&ccc_inode_fini_guard);
+ mutex_unlock(&ccc_inode_fini_guard);
} else
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
CDEBUG(D_SUPER, "Changing connect_flags: "LPX64" -> "LPX64"\n",
lco->lco_flags, flags);
- cfs_mutex_lock(&lco->lco_lock);
+ mutex_lock(&lco->lco_lock);
lco->lco_flags &= flags;
/* for each osc event update ea size */
if (lco->lco_dt_exp)
cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
- cfs_mutex_unlock(&lco->lco_lock);
+ mutex_unlock(&lco->lco_lock);
result = 0;
} else {
CERROR("unexpected notification from %s %s!\n",
{
/* on server-side resource of lock doesn't change */
if (!lock->l_ns_srv)
- cfs_spin_lock(&lock->l_lock);
+ spin_lock(&lock->l_lock);
lock_res(lock->l_resource);
unlock_res(lock->l_resource);
if (!lock->l_ns_srv)
- cfs_spin_unlock(&lock->l_lock);
+ spin_unlock(&lock->l_lock);
}
EXPORT_SYMBOL(unlock_res_and_lock);
imp = obd->u.cli.cl_import;
if (NULL != imp) {
- cfs_spin_lock(&imp->imp_lock);
- fwd.fwd_generation = imp->imp_generation;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ fwd.fwd_generation = imp->imp_generation;
+ spin_unlock(&imp->imp_lock);
}
lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
extern cfs_atomic_t ldlm_srv_namespace_nr;
extern cfs_atomic_t ldlm_cli_namespace_nr;
-extern cfs_mutex_t ldlm_srv_namespace_lock;
+extern struct mutex ldlm_srv_namespace_lock;
extern cfs_list_t ldlm_srv_namespace_list;
-extern cfs_mutex_t ldlm_cli_namespace_lock;
+extern struct mutex ldlm_cli_namespace_lock;
extern cfs_list_t ldlm_cli_namespace_list;
static inline cfs_atomic_t *ldlm_namespace_nr(ldlm_side_t client)
&ldlm_srv_namespace_list : &ldlm_cli_namespace_list;
}
-static inline cfs_mutex_t *ldlm_namespace_lock(ldlm_side_t client)
+static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
&ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
struct ldlm_pool *pl = data; \
type tmp; \
\
- cfs_spin_lock(&pl->pl_lock); \
- tmp = pl->pl_##var; \
- cfs_spin_unlock(&pl->pl_lock); \
+ spin_lock(&pl->pl_lock); \
+ tmp = pl->pl_##var; \
+ spin_unlock(&pl->pl_lock); \
\
return lprocfs_rd_uint(page, start, off, count, eof, &tmp); \
} \
return rc; \
} \
\
- cfs_spin_lock(&pl->pl_lock); \
- pl->pl_##var = tmp; \
- cfs_spin_unlock(&pl->pl_lock); \
+ spin_lock(&pl->pl_lock); \
+ pl->pl_##var = tmp; \
+ spin_unlock(&pl->pl_lock); \
\
return rc; \
} \
}
}
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
cfs_list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
if (obd_uuid_equals(uuid, &item->oic_uuid)) {
if (priority) {
CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? ", moved to head" : ""));
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
GOTO(out_free, rc = 0);
}
}
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? "head" : "tail"));
} else {
- cfs_spin_unlock(&imp->imp_lock);
- GOTO(out_free, rc = -ENOENT);
- }
+ spin_unlock(&imp->imp_lock);
+ GOTO(out_free, rc = -ENOENT);
+ }
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
RETURN(0);
out_free:
if (imp_conn)
int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
{
- struct obd_import_conn *imp_conn;
- struct obd_export *dlmexp;
- int rc = -ENOENT;
- ENTRY;
+ struct obd_import_conn *imp_conn;
+ struct obd_export *dlmexp;
+ int rc = -ENOENT;
+ ENTRY;
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
if (cfs_list_empty(&imp->imp_conn_list)) {
LASSERT(!imp->imp_connection);
GOTO(out, rc);
break;
}
out:
- cfs_spin_unlock(&imp->imp_lock);
- if (rc == -ENOENT)
- CERROR("connection %s not found\n", uuid->uuid);
- RETURN(rc);
+ spin_unlock(&imp->imp_lock);
+ if (rc == -ENOENT)
+ CERROR("connection %s not found\n", uuid->uuid);
+ RETURN(rc);
}
EXPORT_SYMBOL(client_import_del_conn);
* to find a conn uuid of @imp which can reach @peer.
*/
int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
- struct obd_uuid *uuid)
+ struct obd_uuid *uuid)
{
- struct obd_import_conn *conn;
- int rc = -ENOENT;
- ENTRY;
+ struct obd_import_conn *conn;
+ int rc = -ENOENT;
+ ENTRY;
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
/* check if conn uuid does have this peer nid */
if (class_check_uuid(&conn->oic_uuid, peer)) {
break;
}
}
- cfs_spin_unlock(&imp->imp_lock);
- RETURN(rc);
+ spin_unlock(&imp->imp_lock);
+ RETURN(rc);
}
EXPORT_SYMBOL(client_import_find_conn);
RETURN(-EINVAL);
}
- cfs_init_rwsem(&cli->cl_sem);
- cfs_sema_init(&cli->cl_mgc_sem, 1);
+ init_rwsem(&cli->cl_sem);
+ sema_init(&cli->cl_mgc_sem, 1);
cli->cl_conn_count = 0;
memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
client_obd_list_lock_init(&cli->cl_loi_list_lock);
cfs_atomic_set(&cli->cl_pending_w_pages, 0);
cfs_atomic_set(&cli->cl_pending_r_pages, 0);
- cli->cl_r_in_flight = 0;
- cli->cl_w_in_flight = 0;
+ cli->cl_r_in_flight = 0;
+ cli->cl_w_in_flight = 0;
- cfs_spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_read_page_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_write_page_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
- cfs_spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
+ spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
+ spin_lock_init(&cli->cl_read_page_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_page_hist.oh_lock);
+ spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
+ spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
/* lru for osc. */
CFS_INIT_LIST_HEAD(&cli->cl_lru_osc);
CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
name, obddev->obd_name,
cli->cl_target_uuid.uuid);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 1;
+ spin_unlock(&imp->imp_lock);
}
}
ENTRY;
*exp = NULL;
- cfs_down_write(&cli->cl_sem);
+ down_write(&cli->cl_sem);
if (cli->cl_conn_count > 0 )
GOTO(out_sem, rc = -EALREADY);
*exp = NULL;
}
out_sem:
- cfs_up_write(&cli->cl_sem);
+ up_write(&cli->cl_sem);
return rc;
}
cli = &obd->u.cli;
imp = cli->cl_import;
- cfs_down_write(&cli->cl_sem);
+ down_write(&cli->cl_sem);
CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
cli->cl_conn_count);
/* Mark import deactivated now, so we don't try to reconnect if any
* of the cleanup RPCs fails (e.g. ldlm cancel, etc). We don't
* fully deactivate the import, or that would drop all requests. */
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 1;
+ spin_unlock(&imp->imp_lock);
/* Some non-replayable imports (MDS's OSCs) are pinged, so just
* delete it regardless. (It's safe to delete an import that was
* there's no need to hold sem during disconnecting an import,
* and actually it may cause deadlock in gss.
*/
- cfs_up_write(&cli->cl_sem);
- rc = ptlrpc_disconnect_import(imp, 0);
- cfs_down_write(&cli->cl_sem);
+ up_write(&cli->cl_sem);
+ rc = ptlrpc_disconnect_import(imp, 0);
+ down_write(&cli->cl_sem);
ptlrpc_invalidate_import(imp);
if (!rc && err)
rc = err;
- cfs_up_write(&cli->cl_sem);
+ up_write(&cli->cl_sem);
RETURN(rc);
}
ldlm_cancel_locks_for_export(exp);
/* complete all outstanding replies */
- cfs_spin_lock(&exp->exp_lock);
- while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
- struct ptlrpc_reply_state *rs =
- cfs_list_entry(exp->exp_outstanding_replies.next,
- struct ptlrpc_reply_state, rs_exp_list);
+ spin_lock(&exp->exp_lock);
+ while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
+ struct ptlrpc_reply_state *rs =
+ cfs_list_entry(exp->exp_outstanding_replies.next,
+ struct ptlrpc_reply_state, rs_exp_list);
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
cfs_list_del_init(&rs->rs_exp_list);
- cfs_spin_lock(&rs->rs_lock);
+ spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
- cfs_spin_unlock(&rs->rs_lock);
+ spin_unlock(&rs->rs_lock);
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
RETURN(rc);
}
CDEBUG(D_RPCTRACE, "%s: committing for initial connect of %s\n",
obd->obd_name, exp->exp_client_uuid.uuid);
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_need_sync = 0;
- cfs_spin_unlock(&exp->exp_lock);
- class_export_cb_put(exp);
+ spin_lock(&exp->exp_lock);
+ exp->exp_need_sync = 0;
+ spin_unlock(&exp->exp_lock);
+ class_export_cb_put(exp);
}
EXPORT_SYMBOL(target_client_add_cb);
GOTO(out, rc = -ENODEV);
}
- cfs_spin_lock(&target->obd_dev_lock);
+ spin_lock(&target->obd_dev_lock);
if (target->obd_stopping || !target->obd_set_up) {
- cfs_spin_unlock(&target->obd_dev_lock);
+ spin_unlock(&target->obd_dev_lock);
deuuidify(str, NULL, &target_start, &target_len);
LCONSOLE_ERROR_MSG(0x137, "%.*s: Not available for connect "
}
if (target->obd_no_conn) {
- cfs_spin_unlock(&target->obd_dev_lock);
+ spin_unlock(&target->obd_dev_lock);
LCONSOLE_WARN("%s: Temporarily refusing client connection "
"from %s\n", target->obd_name,
targref = class_incref(target, __FUNCTION__, cfs_current());
target->obd_conn_inprogress++;
- cfs_spin_unlock(&target->obd_dev_lock);
+ spin_unlock(&target->obd_dev_lock);
str = req_capsule_client_get(&req->rq_pill, &RMF_CLUUID);
if (str == NULL) {
/* we've found an export in the hash */
- cfs_spin_lock(&export->exp_lock);
-
- if (export->exp_connecting) { /* bug 9635, et. al. */
- cfs_spin_unlock(&export->exp_lock);
- LCONSOLE_WARN("%s: Export %p already connecting from %s\n",
- export->exp_obd->obd_name, export,
- libcfs_nid2str(req->rq_peer.nid));
- class_export_put(export);
- export = NULL;
- rc = -EALREADY;
- } else if (mds_conn && export->exp_connection) {
- cfs_spin_unlock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
+
+ if (export->exp_connecting) { /* bug 9635, et. al. */
+ spin_unlock(&export->exp_lock);
+ LCONSOLE_WARN("%s: Export %p already connecting from %s\n",
+ export->exp_obd->obd_name, export,
+ libcfs_nid2str(req->rq_peer.nid));
+ class_export_put(export);
+ export = NULL;
+ rc = -EALREADY;
+ } else if (mds_conn && export->exp_connection) {
+ spin_unlock(&export->exp_lock);
if (req->rq_peer.nid != export->exp_connection->c_peer.nid)
/* mds reconnected after failover */
LCONSOLE_WARN("%s: Received MDS connection from "
req->rq_peer.nid != export->exp_connection->c_peer.nid &&
(lustre_msg_get_op_flags(req->rq_reqmsg) &
MSG_CONNECT_INITIAL)) {
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
/* in mds failover we have static uuid but nid can be
* changed*/
LCONSOLE_WARN("%s: Client %s seen on new nid %s when "
export = NULL;
} else {
export->exp_connecting = 1;
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
LASSERT(export->exp_obd == target);
rc = target_handle_reconnect(&conn, export, &cluuid);
target->obd_name, cluuid.uuid,
libcfs_nid2str(req->rq_peer.nid),
cfs_atomic_read(&export->exp_rpc_count) - 1);
- cfs_spin_lock(&export->exp_lock);
- if (req->rq_export->exp_conn_cnt <
- lustre_msg_get_conn_cnt(req->rq_reqmsg))
- /* try to abort active requests */
- req->rq_export->exp_abort_active_req = 1;
- cfs_spin_unlock(&export->exp_lock);
- GOTO(out, rc = -EBUSY);
+ spin_lock(&export->exp_lock);
+ if (req->rq_export->exp_conn_cnt <
+ lustre_msg_get_conn_cnt(req->rq_reqmsg))
+ /* try to abort active requests */
+ req->rq_export->exp_abort_active_req = 1;
+ spin_unlock(&export->exp_lock);
+ GOTO(out, rc = -EBUSY);
} else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
if (!strstr(cluuid.uuid, "mdt"))
LCONSOLE_WARN("%s: Rejecting reconnect from the "
/* request takes one export refcount */
req->rq_export = class_export_get(export);
- cfs_spin_lock(&export->exp_lock);
- if (export->exp_conn_cnt >= lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
- cfs_spin_unlock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
+ if (export->exp_conn_cnt >= lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
+ spin_unlock(&export->exp_lock);
CDEBUG(D_RPCTRACE, "%s: %s already connected at greater "
"or equal conn_cnt: %d >= %d\n",
cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
/* request from liblustre? Don't evict it for not pinging. */
if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
export->exp_libclient = 1;
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
- cfs_spin_lock(&target->obd_dev_lock);
- cfs_list_del_init(&export->exp_obd_chain_timed);
- cfs_spin_unlock(&target->obd_dev_lock);
- } else {
- cfs_spin_unlock(&export->exp_lock);
- }
+ spin_lock(&target->obd_dev_lock);
+ cfs_list_del_init(&export->exp_obd_chain_timed);
+ spin_unlock(&target->obd_dev_lock);
+ } else {
+ spin_unlock(&export->exp_lock);
+ }
if (export->exp_connection != NULL) {
/* Check to see if connection came from another NID */
int has_transno;
__u64 transno = data->ocd_transno;
- cfs_spin_lock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
/* possible race with class_disconnect_stale_exports,
* export may be already in the eviction process */
if (export->exp_failed) {
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
GOTO(out, rc = -ENODEV);
}
- export->exp_in_recovery = 1;
- export->exp_req_replay_needed = 1;
- export->exp_lock_replay_needed = 1;
- cfs_spin_unlock(&export->exp_lock);
+ export->exp_in_recovery = 1;
+ export->exp_req_replay_needed = 1;
+ export->exp_lock_replay_needed = 1;
+ spin_unlock(&export->exp_lock);
has_transno = !!(lustre_msg_get_op_flags(req->rq_reqmsg) &
MSG_CONNECT_TRANSNO);
transno > target->obd_last_committed) {
/* another way is to use cmpxchg() so it will be
* lock free */
- cfs_spin_lock(&target->obd_recovery_task_lock);
- if (transno < target->obd_next_recovery_transno)
- target->obd_next_recovery_transno = transno;
- cfs_spin_unlock(&target->obd_recovery_task_lock);
+ spin_lock(&target->obd_recovery_task_lock);
+ if (transno < target->obd_next_recovery_transno)
+ target->obd_next_recovery_transno = transno;
+ spin_unlock(&target->obd_recovery_task_lock);
}
cfs_atomic_inc(&target->obd_req_replay_clients);
GOTO(out, rc = -ENOTCONN);
}
- cfs_spin_lock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
if (export->exp_imp_reverse != NULL)
/* destroyed import can be still referenced in ctxt */
tmp_imp = export->exp_imp_reverse;
export->exp_imp_reverse = revimp;
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
revimp->imp_connection = ptlrpc_connection_addref(export->exp_connection);
revimp->imp_client = &export->exp_obd->obd_ldlm_client;
rc = sptlrpc_import_sec_adapt(revimp, req->rq_svc_ctx, &req->rq_flvr);
if (rc) {
CERROR("Failed to get sec for reverse import: %d\n", rc);
- cfs_spin_lock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
export->exp_imp_reverse = NULL;
- cfs_spin_unlock(&export->exp_lock);
+ spin_unlock(&export->exp_lock);
class_destroy_import(revimp);
}
out:
if (tmp_imp != NULL)
client_destroy_import(tmp_imp);
- if (export) {
- cfs_spin_lock(&export->exp_lock);
- export->exp_connecting = 0;
- cfs_spin_unlock(&export->exp_lock);
+ if (export) {
+ spin_lock(&export->exp_lock);
+ export->exp_connecting = 0;
+ spin_unlock(&export->exp_lock);
- class_export_put(export);
- }
- if (targref) {
- cfs_spin_lock(&target->obd_dev_lock);
+ class_export_put(export);
+ }
+ if (targref) {
+ spin_lock(&target->obd_dev_lock);
target->obd_conn_inprogress--;
- cfs_spin_unlock(&target->obd_dev_lock);
+ spin_unlock(&target->obd_dev_lock);
- class_decref(targref, __FUNCTION__, cfs_current());
+ class_decref(targref, __func__, cfs_current());
}
- if (rc)
- req->rq_status = rc;
- RETURN(rc);
+ if (rc)
+ req->rq_status = rc;
+ RETURN(rc);
}
EXPORT_SYMBOL(target_handle_connect);
struct obd_import *imp = NULL;
/* exports created from last_rcvd data, and "fake"
exports created by lctl don't have an import */
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
if (exp->exp_imp_reverse != NULL) {
imp = exp->exp_imp_reverse;
exp->exp_imp_reverse = NULL;
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
if (imp != NULL)
client_destroy_import(imp);
LASSERT(exp);
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
cfs_list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
rq_replay_list) {
if (lustre_msg_get_transno(reqiter->rq_reqmsg) == transno) {
&exp->exp_req_replay_queue);
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
return dup;
}
static void target_exp_dequeue_req_replay(struct ptlrpc_request *req)
{
- LASSERT(!cfs_list_empty(&req->rq_replay_list));
- LASSERT(req->rq_export);
+ LASSERT(!cfs_list_empty(&req->rq_replay_list));
+ LASSERT(req->rq_export);
- cfs_spin_lock(&req->rq_export->exp_lock);
- cfs_list_del_init(&req->rq_replay_list);
- cfs_spin_unlock(&req->rq_export->exp_lock);
+ spin_lock(&req->rq_export->exp_lock);
+ cfs_list_del_init(&req->rq_replay_list);
+ spin_unlock(&req->rq_export->exp_lock);
}
#ifdef __KERNEL__
}
ldlm_reprocess_all_ns(obd->obd_namespace);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
if (!cfs_list_empty(&obd->obd_req_replay_queue) ||
!cfs_list_empty(&obd->obd_lock_replay_queue) ||
!cfs_list_empty(&obd->obd_final_req_queue)) {
"" : "lock ",
cfs_list_empty(&obd->obd_final_req_queue) ? \
"" : "final ");
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- LBUG();
- }
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ LBUG();
+ }
+ spin_unlock(&obd->obd_recovery_task_lock);
obd->obd_recovery_end = cfs_time_current_sec();
static void abort_req_replay_queue(struct obd_device *obd)
{
- struct ptlrpc_request *req, *n;
- cfs_list_t abort_list;
+ struct ptlrpc_request *req, *n;
+ cfs_list_t abort_list;
- CFS_INIT_LIST_HEAD(&abort_list);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ CFS_INIT_LIST_HEAD(&abort_list);
+ spin_lock(&obd->obd_recovery_task_lock);
+ cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list) {
DEBUG_REQ(D_WARNING, req, "aborted:");
req->rq_status = -ENOTCONN;
static void abort_lock_replay_queue(struct obd_device *obd)
{
- struct ptlrpc_request *req, *n;
- cfs_list_t abort_list;
+ struct ptlrpc_request *req, *n;
+ cfs_list_t abort_list;
- CFS_INIT_LIST_HEAD(&abort_list);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ CFS_INIT_LIST_HEAD(&abort_list);
+ spin_lock(&obd->obd_recovery_task_lock);
+ cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list){
DEBUG_REQ(D_ERROR, req, "aborted:");
req->rq_status = -ENOTCONN;
ENTRY;
CFS_INIT_LIST_HEAD(&clean_list);
- cfs_spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering) {
- cfs_spin_unlock(&obd->obd_dev_lock);
- EXIT;
- return;
- }
- obd->obd_recovering = obd->obd_abort_recovery = 0;
- cfs_spin_unlock(&obd->obd_dev_lock);
-
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- target_cancel_recovery_timer(obd);
- cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
-
- cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
- LASSERT(req->rq_reply_state == 0);
- target_exp_dequeue_req_replay(req);
- target_request_copy_put(req);
- }
+ spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_recovering) {
+ spin_unlock(&obd->obd_dev_lock);
+ EXIT;
+ return;
+ }
+ obd->obd_recovering = obd->obd_abort_recovery = 0;
+ spin_unlock(&obd->obd_dev_lock);
+
+ spin_lock(&obd->obd_recovery_task_lock);
+ target_cancel_recovery_timer(obd);
+ cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
+
+ cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
+ LASSERT(req->rq_reply_state == 0);
+ target_exp_dequeue_req_replay(req);
+ target_request_copy_put(req);
+ }
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
- cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
+ cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list){
LASSERT(req->rq_reply_state == 0);
static void target_start_recovery_timer(struct obd_device *obd)
{
- if (obd->obd_recovery_start != 0)
- return;
+ if (obd->obd_recovery_start != 0)
+ return;
- cfs_spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering || obd->obd_abort_recovery) {
- cfs_spin_unlock(&obd->obd_dev_lock);
- return;
- }
+ spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
+ spin_unlock(&obd->obd_dev_lock);
+ return;
+ }
- LASSERT(obd->obd_recovery_timeout != 0);
+ LASSERT(obd->obd_recovery_timeout != 0);
- if (obd->obd_recovery_start != 0) {
- cfs_spin_unlock(&obd->obd_dev_lock);
- return;
- }
+ if (obd->obd_recovery_start != 0) {
+ spin_unlock(&obd->obd_dev_lock);
+ return;
+ }
- cfs_timer_arm(&obd->obd_recovery_timer,
- cfs_time_shift(obd->obd_recovery_timeout));
- obd->obd_recovery_start = cfs_time_current_sec();
- cfs_spin_unlock(&obd->obd_dev_lock);
+ cfs_timer_arm(&obd->obd_recovery_timer,
+ cfs_time_shift(obd->obd_recovery_timeout));
+ obd->obd_recovery_start = cfs_time_current_sec();
+ spin_unlock(&obd->obd_dev_lock);
LCONSOLE_WARN("%s: Will be in recovery for at least %d:%.02d, "
"or until %d client%s reconnect%s\n",
*/
static void extend_recovery_timer(struct obd_device *obd, int drt, bool extend)
{
- cfs_time_t now;
- cfs_time_t end;
- cfs_duration_t left;
- int to;
-
- cfs_spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_recovering || obd->obd_abort_recovery) {
- cfs_spin_unlock(&obd->obd_dev_lock);
+ cfs_time_t now;
+ cfs_time_t end;
+ cfs_duration_t left;
+ int to;
+
+ spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_recovering || obd->obd_abort_recovery) {
+ spin_unlock(&obd->obd_dev_lock);
return;
}
LASSERT(obd->obd_recovery_start != 0);
cfs_timer_arm(&obd->obd_recovery_timer,
cfs_time_shift(drt));
}
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
- CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
- obd->obd_name, (unsigned)drt);
+ CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
+ obd->obd_name, (unsigned)drt);
}
/* Reset the timer with each new client connection */
static int check_for_next_transno(struct obd_device *obd)
{
- struct ptlrpc_request *req = NULL;
- int wake_up = 0, connected, completed, queue_len;
- __u64 next_transno, req_transno;
- ENTRY;
+ struct ptlrpc_request *req = NULL;
+ int wake_up = 0, connected, completed, queue_len;
+ __u64 next_transno, req_transno;
+ ENTRY;
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
req = cfs_list_entry(obd->obd_req_replay_queue.next,
struct ptlrpc_request, rq_list);
obd->obd_next_recovery_transno = req_transno;
wake_up = 1;
}
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- return wake_up;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ return wake_up;
}
static int check_for_next_lock(struct obd_device *obd)
{
- int wake_up = 0;
+ int wake_up = 0;
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
CDEBUG(D_HA, "waking for next lock\n");
wake_up = 1;
CDEBUG(D_HA, "waking for expired recovery\n");
wake_up = 1;
}
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_unlock(&obd->obd_recovery_task_lock);
- return wake_up;
+ return wake_up;
}
/**
/** evict cexports with no replay in queue, they are stalled */
class_disconnect_stale_exports(obd, health_check);
/** continue with VBR */
- cfs_spin_lock(&obd->obd_dev_lock);
- obd->obd_version_recov = 1;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ obd->obd_version_recov = 1;
+ spin_unlock(&obd->obd_dev_lock);
/**
* reset timer, recovery will proceed with versions now,
* timeout is set just to handle reconnection delays
abort_lock_replay_queue(obd);
}
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
- req = cfs_list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
- obd->obd_requests_queued_for_recovery--;
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- } else {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
- LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
- /** evict exports failed VBR */
- class_disconnect_stale_exports(obd, exp_vbr_healthy);
- }
- RETURN(req);
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
+ req = cfs_list_entry(obd->obd_req_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
+ obd->obd_requests_queued_for_recovery--;
+ spin_unlock(&obd->obd_recovery_task_lock);
+ } else {
+ spin_unlock(&obd->obd_recovery_task_lock);
+ LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
+ LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
+ /** evict exports failed VBR */
+ class_disconnect_stale_exports(obd, exp_vbr_healthy);
+ }
+ RETURN(req);
}
static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
exp_lock_replay_healthy))
abort_lock_replay_queue(obd);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
- req = cfs_list_entry(obd->obd_lock_replay_queue.next,
- struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- } else {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
+ req = cfs_list_entry(obd->obd_lock_replay_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ } else {
+ spin_unlock(&obd->obd_recovery_task_lock);
LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients) == 0);
/** evict exports failed VBR */
static struct ptlrpc_request *target_next_final_ping(struct obd_device *obd)
{
- struct ptlrpc_request *req = NULL;
-
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (!cfs_list_empty(&obd->obd_final_req_queue)) {
- req = cfs_list_entry(obd->obd_final_req_queue.next,
- struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- if (req->rq_export->exp_in_recovery) {
- cfs_spin_lock(&req->rq_export->exp_lock);
- req->rq_export->exp_in_recovery = 0;
- cfs_spin_unlock(&req->rq_export->exp_lock);
- }
- } else {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- }
- return req;
+ struct ptlrpc_request *req = NULL;
+
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (!cfs_list_empty(&obd->obd_final_req_queue)) {
+ req = cfs_list_entry(obd->obd_final_req_queue.next,
+ struct ptlrpc_request, rq_list);
+ cfs_list_del_init(&req->rq_list);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ if (req->rq_export->exp_in_recovery) {
+ spin_lock(&req->rq_export->exp_lock);
+ req->rq_export->exp_in_recovery = 0;
+ spin_unlock(&req->rq_export->exp_lock);
+ }
+ } else {
+ spin_unlock(&obd->obd_recovery_task_lock);
+ }
+ return req;
}
static int handle_recovery_req(struct ptlrpc_thread *thread,
cfs_curproc_pid());
trd->trd_processing_task = cfs_curproc_pid();
- cfs_spin_lock(&obd->obd_dev_lock);
- obd->obd_recovering = 1;
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_complete(&trd->trd_starting);
+ spin_lock(&obd->obd_dev_lock);
+ obd->obd_recovering = 1;
+ spin_unlock(&obd->obd_dev_lock);
+ complete(&trd->trd_starting);
/* first of all, we have to know the first transno to replay */
if (target_recovery_overseer(obd, check_for_clients,
* bz18031: increase next_recovery_transno before
* target_request_copy_put() will drop exp_rpc reference
*/
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- obd->obd_next_recovery_transno++;
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ obd->obd_next_recovery_transno++;
+ spin_unlock(&obd->obd_recovery_task_lock);
target_exp_dequeue_req_replay(req);
target_request_copy_put(req);
obd->obd_replayed_requests++;
tgt_boot_epoch_update(lut);
/* We drop recoverying flag to forward all new requests
* to regular mds_handle() since now */
- cfs_spin_lock(&obd->obd_dev_lock);
- obd->obd_recovering = obd->obd_abort_recovery = 0;
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- target_cancel_recovery_timer(obd);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_dev_lock);
+ obd->obd_recovering = obd->obd_abort_recovery = 0;
+ spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ target_cancel_recovery_timer(obd);
+ spin_unlock(&obd->obd_recovery_task_lock);
while ((req = target_next_final_ping(obd))) {
LASSERT(trd->trd_processing_task == cfs_curproc_pid());
DEBUG_REQ(D_HA, req, "processing final ping from %s: ",
lu_context_fini(&env->le_ctx);
trd->trd_processing_task = 0;
- cfs_complete(&trd->trd_finishing);
+ complete(&trd->trd_finishing);
OBD_FREE_PTR(thread);
OBD_FREE_PTR(env);
struct target_recovery_data *trd = &obd->obd_recovery_data;
memset(trd, 0, sizeof(*trd));
- cfs_init_completion(&trd->trd_starting);
- cfs_init_completion(&trd->trd_finishing);
+ init_completion(&trd->trd_starting);
+ init_completion(&trd->trd_finishing);
trd->trd_recovery_handler = handler;
if (cfs_create_thread(target_recovery_thread, lut, 0) > 0) {
- cfs_wait_for_completion(&trd->trd_starting);
+ wait_for_completion(&trd->trd_starting);
LASSERT(obd->obd_recovering != 0);
} else
rc = -ECHILD;
void target_stop_recovery_thread(struct obd_device *obd)
{
- if (obd->obd_recovery_data.trd_processing_task > 0) {
- struct target_recovery_data *trd = &obd->obd_recovery_data;
- /** recovery can be done but postrecovery is not yet */
- cfs_spin_lock(&obd->obd_dev_lock);
- if (obd->obd_recovering) {
- CERROR("%s: Aborting recovery\n", obd->obd_name);
- obd->obd_abort_recovery = 1;
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
- }
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_wait_for_completion(&trd->trd_finishing);
- }
+ if (obd->obd_recovery_data.trd_processing_task > 0) {
+ struct target_recovery_data *trd = &obd->obd_recovery_data;
+ /** recovery can be done but postrecovery is not yet */
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_recovering) {
+ CERROR("%s: Aborting recovery\n", obd->obd_name);
+ obd->obd_abort_recovery = 1;
+ cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ }
+ spin_unlock(&obd->obd_dev_lock);
+ wait_for_completion(&trd->trd_finishing);
+ }
}
EXPORT_SYMBOL(target_stop_recovery_thread);
static int target_process_req_flags(struct obd_device *obd,
struct ptlrpc_request *req)
{
- struct obd_export *exp = req->rq_export;
- LASSERT(exp != NULL);
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
- /* client declares he's ready to replay locks */
- cfs_spin_lock(&exp->exp_lock);
- if (exp->exp_req_replay_needed) {
- exp->exp_req_replay_needed = 0;
- cfs_spin_unlock(&exp->exp_lock);
-
- LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
- cfs_atomic_dec(&obd->obd_req_replay_clients);
- } else {
- cfs_spin_unlock(&exp->exp_lock);
- }
- }
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
- /* client declares he's ready to complete recovery
- * so, we put the request on th final queue */
- cfs_spin_lock(&exp->exp_lock);
- if (exp->exp_lock_replay_needed) {
- exp->exp_lock_replay_needed = 0;
- cfs_spin_unlock(&exp->exp_lock);
-
- LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
- cfs_atomic_dec(&obd->obd_lock_replay_clients);
- } else {
- cfs_spin_unlock(&exp->exp_lock);
- }
- }
- return 0;
+ struct obd_export *exp = req->rq_export;
+ LASSERT(exp != NULL);
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
+ /* client declares he's ready to replay locks */
+ spin_lock(&exp->exp_lock);
+ if (exp->exp_req_replay_needed) {
+ exp->exp_req_replay_needed = 0;
+ spin_unlock(&exp->exp_lock);
+
+ LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
+ cfs_atomic_dec(&obd->obd_req_replay_clients);
+ } else {
+ spin_unlock(&exp->exp_lock);
+ }
+ }
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
+ /* client declares he's ready to complete recovery
+ * so, we put the request on th final queue */
+ spin_lock(&exp->exp_lock);
+ if (exp->exp_lock_replay_needed) {
+ exp->exp_lock_replay_needed = 0;
+ spin_unlock(&exp->exp_lock);
+
+ LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
+ cfs_atomic_dec(&obd->obd_lock_replay_clients);
+ } else {
+ spin_unlock(&exp->exp_lock);
+ }
+ }
+ return 0;
}
int target_queue_recovery_request(struct ptlrpc_request *req,
target_request_copy_get(req);
DEBUG_REQ(D_HA, req, "queue final req");
cfs_waitq_signal(&obd->obd_next_transno_waitq);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (obd->obd_recovering) {
- cfs_list_add_tail(&req->rq_list,
- &obd->obd_final_req_queue);
- } else {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- target_request_copy_put(req);
- RETURN(obd->obd_stopping ? -ENOTCONN : 1);
- }
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- RETURN(0);
- }
- if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
- /* client declares he's ready to replay locks */
- target_request_copy_get(req);
- DEBUG_REQ(D_HA, req, "queue lock replay req");
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- LASSERT(obd->obd_recovering);
- /* usually due to recovery abort */
- if (!req->rq_export->exp_in_recovery) {
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- target_request_copy_put(req);
- RETURN(-ENOTCONN);
- }
- LASSERT(req->rq_export->exp_lock_replay_needed);
- cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- RETURN(0);
- }
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (obd->obd_recovering) {
+ cfs_list_add_tail(&req->rq_list,
+ &obd->obd_final_req_queue);
+ } else {
+ spin_unlock(&obd->obd_recovery_task_lock);
+ target_request_copy_put(req);
+ RETURN(obd->obd_stopping ? -ENOTCONN : 1);
+ }
+ spin_unlock(&obd->obd_recovery_task_lock);
+ RETURN(0);
+ }
+ if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
+ /* client declares he's ready to replay locks */
+ target_request_copy_get(req);
+ DEBUG_REQ(D_HA, req, "queue lock replay req");
+ cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ spin_lock(&obd->obd_recovery_task_lock);
+ LASSERT(obd->obd_recovering);
+ /* usually due to recovery abort */
+ if (!req->rq_export->exp_in_recovery) {
+ spin_unlock(&obd->obd_recovery_task_lock);
+ target_request_copy_put(req);
+ RETURN(-ENOTCONN);
+ }
+ LASSERT(req->rq_export->exp_lock_replay_needed);
+ cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ RETURN(0);
+ }
/* CAVEAT EMPTOR: The incoming request message has been swabbed
* (i.e. buflens etc are in my own byte order), but type-dependent
CDEBUG(D_HA, "Next recovery transno: "LPU64
", current: "LPU64", replaying\n",
obd->obd_next_recovery_transno, transno);
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (transno < obd->obd_next_recovery_transno) {
- /* Processing the queue right now, don't re-add. */
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- RETURN(1);
- }
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (transno < obd->obd_next_recovery_transno) {
+ /* Processing the queue right now, don't re-add. */
+ LASSERT(cfs_list_empty(&req->rq_list));
+ spin_unlock(&obd->obd_recovery_task_lock);
+ RETURN(1);
+ }
+ spin_unlock(&obd->obd_recovery_task_lock);
if (OBD_FAIL_CHECK(OBD_FAIL_TGT_REPLAY_DROP))
RETURN(0);
}
/* XXX O(n^2) */
- cfs_spin_lock(&obd->obd_recovery_task_lock);
+ spin_lock(&obd->obd_recovery_task_lock);
LASSERT(obd->obd_recovering);
cfs_list_for_each(tmp, &obd->obd_req_replay_queue) {
struct ptlrpc_request *reqiter =
transno)) {
DEBUG_REQ(D_ERROR, req, "dropping replay: transno "
"has been claimed by another client");
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
+ spin_unlock(&obd->obd_recovery_task_lock);
target_exp_dequeue_req_replay(req);
target_request_copy_put(req);
RETURN(0);
cfs_list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
obd->obd_requests_queued_for_recovery++;
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- cfs_waitq_signal(&obd->obd_next_transno_waitq);
- RETURN(0);
+ spin_unlock(&obd->obd_recovery_task_lock);
+ cfs_waitq_signal(&obd->obd_next_transno_waitq);
+ RETURN(0);
}
EXPORT_SYMBOL(target_queue_recovery_request);
*/
obd = req->rq_export->exp_obd;
- cfs_read_lock(&obd->obd_pool_lock);
+ read_lock(&obd->obd_pool_lock);
lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv);
lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
- cfs_read_unlock(&obd->obd_pool_lock);
+ read_unlock(&obd->obd_pool_lock);
RETURN(0);
}
rs->rs_export = exp;
rs->rs_opc = lustre_msg_get_opc(req->rq_reqmsg);
- cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
- CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
- rs->rs_transno, exp->exp_last_committed);
- if (rs->rs_transno > exp->exp_last_committed) {
- /* not committed already */
- cfs_list_add_tail(&rs->rs_obd_list,
- &exp->exp_uncommitted_replies);
- }
- cfs_spin_unlock (&exp->exp_uncommitted_replies_lock);
+ spin_lock(&exp->exp_uncommitted_replies_lock);
+ CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
+ rs->rs_transno, exp->exp_last_committed);
+ if (rs->rs_transno > exp->exp_last_committed) {
+ /* not committed already */
+ cfs_list_add_tail(&rs->rs_obd_list,
+ &exp->exp_uncommitted_replies);
+ }
+ spin_unlock(&exp->exp_uncommitted_replies_lock);
- cfs_spin_lock(&exp->exp_lock);
- cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
- cfs_spin_unlock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
+ cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
+ spin_unlock(&exp->exp_lock);
- netrc = target_send_reply_msg (req, rc, fail_id);
+ netrc = target_send_reply_msg(req, rc, fail_id);
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
cfs_atomic_inc(&svcpt->scp_nreps_difficult);
ptlrpc_rs_addref(rs);
}
- cfs_spin_lock(&rs->rs_lock);
- if (rs->rs_transno <= exp->exp_last_committed ||
- (!rs->rs_on_net && !rs->rs_no_ack) ||
- cfs_list_empty(&rs->rs_exp_list) || /* completed already */
- cfs_list_empty(&rs->rs_obd_list)) {
- CDEBUG(D_HA, "Schedule reply immediately\n");
- ptlrpc_dispatch_difficult_reply(rs);
- } else {
+ spin_lock(&rs->rs_lock);
+ if (rs->rs_transno <= exp->exp_last_committed ||
+ (!rs->rs_on_net && !rs->rs_no_ack) ||
+ cfs_list_empty(&rs->rs_exp_list) || /* completed already */
+ cfs_list_empty(&rs->rs_obd_list)) {
+ CDEBUG(D_HA, "Schedule reply immediately\n");
+ ptlrpc_dispatch_difficult_reply(rs);
+ } else {
cfs_list_add(&rs->rs_list, &svcpt->scp_rep_active);
rs->rs_scheduled = 0; /* allow notifier to schedule */
}
- cfs_spin_unlock(&rs->rs_lock);
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&rs->rs_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
EXIT;
}
EXPORT_SYMBOL(target_send_reply);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
void ldlm_dump_export_locks(struct obd_export *exp)
{
- cfs_spin_lock(&exp->exp_locks_list_guard);
- if (!cfs_list_empty(&exp->exp_locks_list)) {
- struct ldlm_lock *lock;
-
- CERROR("dumping locks for export %p,"
- "ignore if the unmount doesn't hang\n", exp);
- cfs_list_for_each_entry(lock, &exp->exp_locks_list, l_exp_refs_link)
- LDLM_ERROR(lock, "lock:");
- }
- cfs_spin_unlock(&exp->exp_locks_list_guard);
+ spin_lock(&exp->exp_locks_list_guard);
+ if (!cfs_list_empty(&exp->exp_locks_list)) {
+ struct ldlm_lock *lock;
+
+ CERROR("dumping locks for export %p,"
+ "ignore if the unmount doesn't hang\n", exp);
+ cfs_list_for_each_entry(lock, &exp->exp_locks_list,
+ l_exp_refs_link)
+ LDLM_ERROR(lock, "lock:");
+ }
+ spin_unlock(&exp->exp_locks_list_guard);
}
#endif
RETURN(0);
}
- cfs_spin_lock(&ns->ns_lock);
- rc = ldlm_lock_remove_from_lru_nolock(lock);
- cfs_spin_unlock(&ns->ns_lock);
- EXIT;
- return rc;
+ spin_lock(&ns->ns_lock);
+ rc = ldlm_lock_remove_from_lru_nolock(lock);
+ spin_unlock(&ns->ns_lock);
+ EXIT;
+ return rc;
}
void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
{
- struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+ struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- ENTRY;
- cfs_spin_lock(&ns->ns_lock);
- ldlm_lock_add_to_lru_nolock(lock);
- cfs_spin_unlock(&ns->ns_lock);
- EXIT;
+ ENTRY;
+ spin_lock(&ns->ns_lock);
+ ldlm_lock_add_to_lru_nolock(lock);
+ spin_unlock(&ns->ns_lock);
+ EXIT;
}
void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
return;
}
- cfs_spin_lock(&ns->ns_lock);
- if (!cfs_list_empty(&lock->l_lru)) {
- ldlm_lock_remove_from_lru_nolock(lock);
- ldlm_lock_add_to_lru_nolock(lock);
- }
- cfs_spin_unlock(&ns->ns_lock);
- EXIT;
+ spin_lock(&ns->ns_lock);
+ if (!cfs_list_empty(&lock->l_lru)) {
+ ldlm_lock_remove_from_lru_nolock(lock);
+ ldlm_lock_add_to_lru_nolock(lock);
+ }
+ spin_unlock(&ns->ns_lock);
+ EXIT;
}
/* This used to have a 'strict' flag, which recovery would use to mark an
if (lock == NULL)
RETURN(NULL);
- cfs_spin_lock_init(&lock->l_lock);
+ spin_lock_init(&lock->l_lock);
lock->l_resource = resource;
lu_ref_add(&resource->lr_reference, "lock", lock);
* lock->l_lock, and are taken in the memory address order to avoid
* dead-locks.
*/
- cfs_spin_lock(&lock->l_lock);
+ spin_lock(&lock->l_lock);
oldres = lock->l_resource;
if (oldres < newres) {
lock_res(oldres);
extern cfs_mem_cache_t *ldlm_resource_slab;
extern cfs_mem_cache_t *ldlm_lock_slab;
-static cfs_mutex_t ldlm_ref_mutex;
+static struct mutex ldlm_ref_mutex;
static int ldlm_refcount;
struct ldlm_cb_async_args {
#define ELT_TERMINATE 2
struct ldlm_bl_pool {
- cfs_spinlock_t blp_lock;
+ spinlock_t blp_lock;
/*
* blp_prio_list is used for callbacks that should be handled
cfs_list_t blp_list;
cfs_waitq_t blp_waitq;
- cfs_completion_t blp_comp;
+ struct completion blp_comp;
cfs_atomic_t blp_num_threads;
cfs_atomic_t blp_busy_threads;
int blp_min_threads;
struct ldlm_lock *blwi_lock;
cfs_list_t blwi_head;
int blwi_count;
- cfs_completion_t blwi_comp;
+ struct completion blwi_comp;
int blwi_mode;
int blwi_mem_pressure;
};
#if defined(HAVE_SERVER_SUPPORT) && defined(__KERNEL__)
/* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
-static cfs_spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
+static spinlock_t waiting_locks_spinlock; /* BH lock (timer) */
static cfs_list_t waiting_locks_list;
static cfs_timer_t waiting_locks_timer;
static inline int have_expired_locks(void)
{
- int need_to_run;
+ int need_to_run;
- ENTRY;
- cfs_spin_lock_bh(&waiting_locks_spinlock);
- need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ ENTRY;
+ spin_lock_bh(&waiting_locks_spinlock);
+ need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
+ spin_unlock_bh(&waiting_locks_spinlock);
- RETURN(need_to_run);
+ RETURN(need_to_run);
}
static int expired_lock_main(void *arg)
expired_lock_thread.elt_state == ELT_TERMINATE,
&lwi);
- cfs_spin_lock_bh(&waiting_locks_spinlock);
- if (expired_lock_thread.elt_dump) {
- struct libcfs_debug_msg_data msgdata = {
- .msg_file = __FILE__,
- .msg_fn = "waiting_locks_callback",
- .msg_line = expired_lock_thread.elt_dump };
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ spin_lock_bh(&waiting_locks_spinlock);
+ if (expired_lock_thread.elt_dump) {
+ struct libcfs_debug_msg_data msgdata = {
+ .msg_file = __FILE__,
+ .msg_fn = "waiting_locks_callback",
+ .msg_line = expired_lock_thread.elt_dump };
+ spin_unlock_bh(&waiting_locks_spinlock);
- /* from waiting_locks_callback, but not in timer */
- libcfs_debug_dumplog();
- libcfs_run_lbug_upcall(&msgdata);
+ /* from waiting_locks_callback, but not in timer */
+ libcfs_debug_dumplog();
+ libcfs_run_lbug_upcall(&msgdata);
- cfs_spin_lock_bh(&waiting_locks_spinlock);
+ spin_lock_bh(&waiting_locks_spinlock);
expired_lock_thread.elt_dump = 0;
}
l_pending_chain);
if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
(void *)lock >= LP_POISON) {
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ spin_unlock_bh(&waiting_locks_spinlock);
CERROR("free lock on elt list %p\n", lock);
LBUG();
}
LDLM_LOCK_RELEASE(lock);
continue;
}
- export = class_export_lock_get(lock->l_export, lock);
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ export = class_export_lock_get(lock->l_export, lock);
+ spin_unlock_bh(&waiting_locks_spinlock);
- do_dump++;
- class_fail_export(export);
- class_export_lock_put(export, lock);
+ do_dump++;
+ class_fail_export(export);
+ class_export_lock_put(export, lock);
- /* release extra ref grabbed by ldlm_add_waiting_lock()
- * or ldlm_failed_ast() */
- LDLM_LOCK_RELEASE(lock);
+ /* release extra ref grabbed by ldlm_add_waiting_lock()
+ * or ldlm_failed_ast() */
+ LDLM_LOCK_RELEASE(lock);
- cfs_spin_lock_bh(&waiting_locks_spinlock);
- }
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ spin_lock_bh(&waiting_locks_spinlock);
+ }
+ spin_unlock_bh(&waiting_locks_spinlock);
if (do_dump && obd_dump_on_eviction) {
CERROR("dump the log upon eviction\n");
*/
static int ldlm_lock_busy(struct ldlm_lock *lock)
{
- struct ptlrpc_request *req;
- int match = 0;
- ENTRY;
+ struct ptlrpc_request *req;
+ int match = 0;
+ ENTRY;
- if (lock->l_export == NULL)
- return 0;
+ if (lock->l_export == NULL)
+ return 0;
- cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
- cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
- rq_exp_list) {
- if (req->rq_ops->hpreq_lock_match) {
- match = req->rq_ops->hpreq_lock_match(req, lock);
- if (match)
- break;
- }
- }
- cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
- RETURN(match);
+ spin_lock_bh(&lock->l_export->exp_rpc_lock);
+ cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
+ rq_exp_list) {
+ if (req->rq_ops->hpreq_lock_match) {
+ match = req->rq_ops->hpreq_lock_match(req, lock);
+ if (match)
+ break;
+ }
+ }
+ spin_unlock_bh(&lock->l_export->exp_rpc_lock);
+ RETURN(match);
}
/* This is called from within a timer interrupt and cannot schedule */
struct ldlm_lock *lock;
int need_dump = 0;
- cfs_spin_lock_bh(&waiting_locks_spinlock);
+ spin_lock_bh(&waiting_locks_spinlock);
while (!cfs_list_empty(&waiting_locks_list)) {
lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
l_pending_chain);
LDLM_LOCK_GET(lock);
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
- LDLM_DEBUG(lock, "prolong the busy lock");
- ldlm_refresh_waiting_lock(lock,
- ldlm_get_enq_timeout(lock));
- cfs_spin_lock_bh(&waiting_locks_spinlock);
+ spin_unlock_bh(&waiting_locks_spinlock);
+ LDLM_DEBUG(lock, "prolong the busy lock");
+ ldlm_refresh_waiting_lock(lock,
+ ldlm_get_enq_timeout(lock));
+ spin_lock_bh(&waiting_locks_spinlock);
if (!cont) {
LDLM_LOCK_RELEASE(lock);
timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
}
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ spin_unlock_bh(&waiting_locks_spinlock);
}
/*
LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
- cfs_spin_lock_bh(&waiting_locks_spinlock);
- if (lock->l_destroyed) {
- static cfs_time_t next;
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ spin_lock_bh(&waiting_locks_spinlock);
+ if (lock->l_destroyed) {
+ static cfs_time_t next;
+ spin_unlock_bh(&waiting_locks_spinlock);
LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
if (cfs_time_after(cfs_time_current(), next)) {
next = cfs_time_shift(14400);
* waiting list */
LDLM_LOCK_GET(lock);
}
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ spin_unlock_bh(&waiting_locks_spinlock);
- if (ret) {
- cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
- if (cfs_list_empty(&lock->l_exp_list))
- cfs_list_add(&lock->l_exp_list,
- &lock->l_export->exp_bl_list);
- cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
- }
+ if (ret) {
+ spin_lock_bh(&lock->l_export->exp_bl_list_lock);
+ if (cfs_list_empty(&lock->l_exp_list))
+ cfs_list_add(&lock->l_exp_list,
+ &lock->l_export->exp_bl_list);
+ spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
+ }
- LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
- ret == 0 ? "not re-" : "", timeout,
- AT_OFF ? "off" : "on");
- return ret;
+ LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
+ ret == 0 ? "not re-" : "", timeout,
+ AT_OFF ? "off" : "on");
+ return ret;
}
/*
return 0;
}
- cfs_spin_lock_bh(&waiting_locks_spinlock);
- ret = __ldlm_del_waiting_lock(lock);
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ spin_lock_bh(&waiting_locks_spinlock);
+ ret = __ldlm_del_waiting_lock(lock);
+ spin_unlock_bh(&waiting_locks_spinlock);
- /* remove the lock out of export blocking list */
- cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
- cfs_list_del_init(&lock->l_exp_list);
- cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
+ /* remove the lock out of export blocking list */
+ spin_lock_bh(&lock->l_export->exp_bl_list_lock);
+ cfs_list_del_init(&lock->l_exp_list);
+ spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
if (ret) {
/* release lock ref if it has indeed been removed
*/
int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
{
- if (lock->l_export == NULL) {
- /* We don't have a "waiting locks list" on clients. */
- LDLM_DEBUG(lock, "client lock: no-op");
- return 0;
- }
+ if (lock->l_export == NULL) {
+ /* We don't have a "waiting locks list" on clients. */
+ LDLM_DEBUG(lock, "client lock: no-op");
+ return 0;
+ }
- cfs_spin_lock_bh(&waiting_locks_spinlock);
+ spin_lock_bh(&waiting_locks_spinlock);
- if (cfs_list_empty(&lock->l_pending_chain)) {
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
- LDLM_DEBUG(lock, "wasn't waiting");
- return 0;
- }
+ if (cfs_list_empty(&lock->l_pending_chain)) {
+ spin_unlock_bh(&waiting_locks_spinlock);
+ LDLM_DEBUG(lock, "wasn't waiting");
+ return 0;
+ }
- /* we remove/add the lock to the waiting list, so no needs to
- * release/take a lock reference */
- __ldlm_del_waiting_lock(lock);
- __ldlm_add_waiting_lock(lock, timeout);
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ /* we remove/add the lock to the waiting list, so no needs to
+ * release/take a lock reference */
+ __ldlm_del_waiting_lock(lock);
+ __ldlm_add_waiting_lock(lock, timeout);
+ spin_unlock_bh(&waiting_locks_spinlock);
- LDLM_DEBUG(lock, "refreshed");
- return 1;
+ LDLM_DEBUG(lock, "refreshed");
+ return 1;
}
EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
if (obd_dump_on_timeout)
libcfs_debug_dumplog();
#ifdef __KERNEL__
- cfs_spin_lock_bh(&waiting_locks_spinlock);
- if (__ldlm_del_waiting_lock(lock) == 0)
- /* the lock was not in any list, grab an extra ref before adding
- * the lock to the expired list */
- LDLM_LOCK_GET(lock);
- cfs_list_add(&lock->l_pending_chain,
- &expired_lock_thread.elt_expired_locks);
- cfs_waitq_signal(&expired_lock_thread.elt_waitq);
- cfs_spin_unlock_bh(&waiting_locks_spinlock);
+ spin_lock_bh(&waiting_locks_spinlock);
+ if (__ldlm_del_waiting_lock(lock) == 0)
+ /* the lock was not in any list, grab an extra ref before adding
+ * the lock to the expired list */
+ LDLM_LOCK_GET(lock);
+ cfs_list_add(&lock->l_pending_chain,
+ &expired_lock_thread.elt_expired_locks);
+ cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+ spin_unlock_bh(&waiting_locks_spinlock);
#else
- class_fail_export(lock->l_export);
+ class_fail_export(lock->l_export);
#endif
}
RETURN_EXIT;
}
- cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
+ spin_lock_bh(&lock->l_export->exp_rpc_lock);
cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
rq_exp_list) {
/* Do not process requests that were not yet added to there
req->rq_ops->hpreq_lock_match(req, lock))
ptlrpc_hpreq_reorder(req);
}
- cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
- EXIT;
+ spin_unlock_bh(&lock->l_export->exp_rpc_lock);
+ EXIT;
}
/*
#ifdef __KERNEL__
static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
{
- struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
- ENTRY;
+ struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
+ ENTRY;
- cfs_spin_lock(&blp->blp_lock);
- if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
- /* add LDLM_FL_DISCARD_DATA requests to the priority list */
- cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
- } else {
- /* other blocking callbacks are added to the regular list */
- cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
- }
- cfs_spin_unlock(&blp->blp_lock);
+ spin_lock(&blp->blp_lock);
+ if (blwi->blwi_lock &&
+ blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+ /* add LDLM_FL_DISCARD_DATA requests to the priority list */
+ cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+ } else {
+ /* other blocking callbacks are added to the regular list */
+ cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
+ }
+ spin_unlock(&blp->blp_lock);
- cfs_waitq_signal(&blp->blp_waitq);
+ cfs_waitq_signal(&blp->blp_waitq);
- /* can not use blwi->blwi_mode as blwi could be already freed in
- LDLM_ASYNC mode */
- if (mode == LDLM_SYNC)
- cfs_wait_for_completion(&blwi->blwi_comp);
+ /* can not use blwi->blwi_mode as blwi could be already freed in
+ LDLM_ASYNC mode */
+ if (mode == LDLM_SYNC)
+ wait_for_completion(&blwi->blwi_comp);
- RETURN(0);
+ RETURN(0);
}
static inline void init_blwi(struct ldlm_bl_work_item *blwi,
struct ldlm_lock *lock,
int mode)
{
- cfs_init_completion(&blwi->blwi_comp);
+ init_completion(&blwi->blwi_comp);
CFS_INIT_LIST_HEAD(&blwi->blwi_head);
if (cfs_memory_pressure_get())
#ifdef __KERNEL__
static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
{
- struct ldlm_bl_work_item *blwi = NULL;
- static unsigned int num_bl = 0;
+ struct ldlm_bl_work_item *blwi = NULL;
+ static unsigned int num_bl = 0;
- cfs_spin_lock(&blp->blp_lock);
+ spin_lock(&blp->blp_lock);
/* process a request from the blp_list at least every blp_num_threads */
if (!cfs_list_empty(&blp->blp_list) &&
(cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
num_bl = 0;
cfs_list_del(&blwi->blwi_entry);
}
- cfs_spin_unlock(&blp->blp_lock);
+ spin_unlock(&blp->blp_lock);
- return blwi;
+ return blwi;
}
/* This only contains temporary data until the thread starts */
struct ldlm_bl_thread_data {
- char bltd_name[CFS_CURPROC_COMM_MAX];
- struct ldlm_bl_pool *bltd_blp;
- cfs_completion_t bltd_comp;
- int bltd_num;
+ char bltd_name[CFS_CURPROC_COMM_MAX];
+ struct ldlm_bl_pool *bltd_blp;
+ struct completion bltd_comp;
+ int bltd_num;
};
static int ldlm_bl_thread_main(void *arg);
static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
{
- struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
- int rc;
+ struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
+ int rc;
- cfs_init_completion(&bltd.bltd_comp);
- rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
- if (rc < 0) {
- CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
- cfs_atomic_read(&blp->blp_num_threads), rc);
- return rc;
- }
- cfs_wait_for_completion(&bltd.bltd_comp);
+ init_completion(&bltd.bltd_comp);
+ rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
+ if (rc < 0) {
+ CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
+ cfs_atomic_read(&blp->blp_num_threads), rc);
+ return rc;
+ }
+ wait_for_completion(&bltd.bltd_comp);
- return 0;
+ return 0;
}
static int ldlm_bl_thread_main(void *arg)
"ldlm_bl_%02d", bltd->bltd_num);
cfs_daemonize(bltd->bltd_name);
- cfs_complete(&bltd->bltd_comp);
+ complete(&bltd->bltd_comp);
/* cannot use bltd after this, it is only on caller's stack */
}
if (blwi->blwi_mode == LDLM_ASYNC)
OBD_FREE(blwi, sizeof(*blwi));
else
- cfs_complete(&blwi->blwi_comp);
+ complete(&blwi->blwi_comp);
}
cfs_atomic_dec(&blp->blp_busy_threads);
cfs_atomic_dec(&blp->blp_num_threads);
- cfs_complete(&blp->blp_comp);
+ complete(&blp->blp_comp);
RETURN(0);
}
{
int rc = 0;
ENTRY;
- cfs_mutex_lock(&ldlm_ref_mutex);
+ mutex_lock(&ldlm_ref_mutex);
if (++ldlm_refcount == 1) {
rc = ldlm_setup();
if (rc)
ldlm_refcount--;
}
- cfs_mutex_unlock(&ldlm_ref_mutex);
+ mutex_unlock(&ldlm_ref_mutex);
RETURN(rc);
}
void ldlm_put_ref(void)
{
ENTRY;
- cfs_mutex_lock(&ldlm_ref_mutex);
+ mutex_lock(&ldlm_ref_mutex);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup();
if (rc)
} else {
ldlm_refcount--;
}
- cfs_mutex_unlock(&ldlm_ref_mutex);
+ mutex_unlock(&ldlm_ref_mutex);
EXIT;
}
OBD_ALLOC(blp, sizeof(*blp));
if (blp == NULL)
GOTO(out, rc = -ENOMEM);
- ldlm_state->ldlm_bl_pool = blp;
+ ldlm_state->ldlm_bl_pool = blp;
- cfs_spin_lock_init(&blp->blp_lock);
+ spin_lock_init(&blp->blp_lock);
CFS_INIT_LIST_HEAD(&blp->blp_list);
CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
cfs_waitq_init(&blp->blp_waitq);
cfs_waitq_init(&expired_lock_thread.elt_waitq);
CFS_INIT_LIST_HEAD(&waiting_locks_list);
- cfs_spin_lock_init(&waiting_locks_spinlock);
+ spin_lock_init(&waiting_locks_spinlock);
cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
- cfs_init_completion(&blp->blp_comp);
+ init_completion(&blp->blp_comp);
- cfs_spin_lock(&blp->blp_lock);
+ spin_lock(&blp->blp_lock);
cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
cfs_waitq_signal(&blp->blp_waitq);
- cfs_spin_unlock(&blp->blp_lock);
+ spin_unlock(&blp->blp_lock);
- cfs_wait_for_completion(&blp->blp_comp);
+ wait_for_completion(&blp->blp_comp);
}
OBD_FREE(blp, sizeof(*blp));
int ldlm_init(void)
{
- cfs_mutex_init(&ldlm_ref_mutex);
- cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
- cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ mutex_init(&ldlm_ref_mutex);
+ mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
+ mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
sizeof(struct ldlm_resource), 0,
CFS_SLAB_HWCACHE_ALIGN);
*/
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL);
- cfs_write_lock(&obd->obd_pool_lock);
+ write_lock(&obd->obd_pool_lock);
obd->obd_pool_slv = pl->pl_server_lock_volume;
- cfs_write_unlock(&obd->obd_pool_lock);
+ write_unlock(&obd->obd_pool_lock);
}
/**
if (recalc_interval_sec < pl->pl_recalc_period)
RETURN(0);
- cfs_spin_lock(&pl->pl_lock);
- recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
- if (recalc_interval_sec < pl->pl_recalc_period) {
- cfs_spin_unlock(&pl->pl_lock);
- RETURN(0);
- }
+ spin_lock(&pl->pl_lock);
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period) {
+ spin_unlock(&pl->pl_lock);
+ RETURN(0);
+ }
/*
* Recalc SLV after last period. This should be done
* _before_ recalculating new grant plan.
pl->pl_recalc_time = cfs_time_current_sec();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
- cfs_spin_unlock(&pl->pl_lock);
- RETURN(0);
+ spin_unlock(&pl->pl_lock);
+ RETURN(0);
}
/**
if (cfs_atomic_read(&pl->pl_granted) == 0)
RETURN(0);
- cfs_spin_lock(&pl->pl_lock);
+ spin_lock(&pl->pl_lock);
/*
* We want shrinker to possibly cause cancellation of @nr locks from
* Make sure that pool informed obd of last SLV changes.
*/
ldlm_srv_pool_push_slv(pl);
- cfs_spin_unlock(&pl->pl_lock);
+ spin_unlock(&pl->pl_lock);
- /*
- * We did not really free any memory here so far, it only will be
- * freed later may be, so that we return 0 to not confuse VM.
- */
- return 0;
+ /*
+ * We did not really free any memory here so far, it only will be
+ * freed later may be, so that we return 0 to not confuse VM.
+ */
+ return 0;
}
/**
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL && obd != LP_POISON);
LASSERT(obd->obd_type != LP_POISON);
- cfs_write_lock(&obd->obd_pool_lock);
+ write_lock(&obd->obd_pool_lock);
obd->obd_pool_limit = limit;
- cfs_write_unlock(&obd->obd_pool_lock);
+ write_unlock(&obd->obd_pool_lock);
ldlm_pool_set_limit(pl, limit);
return 0;
*/
obd = ldlm_pl2ns(pl)->ns_obd;
LASSERT(obd != NULL);
- cfs_read_lock(&obd->obd_pool_lock);
+ read_lock(&obd->obd_pool_lock);
pl->pl_server_lock_volume = obd->obd_pool_slv;
ldlm_pool_set_limit(pl, obd->obd_pool_limit);
- cfs_read_unlock(&obd->obd_pool_lock);
+ read_unlock(&obd->obd_pool_lock);
}
/**
if (recalc_interval_sec < pl->pl_recalc_period)
RETURN(0);
- cfs_spin_lock(&pl->pl_lock);
- /*
- * Check if we need to recalc lists now.
- */
- recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
- if (recalc_interval_sec < pl->pl_recalc_period) {
- cfs_spin_unlock(&pl->pl_lock);
+ spin_lock(&pl->pl_lock);
+ /*
+ * Check if we need to recalc lists now.
+ */
+ recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+ if (recalc_interval_sec < pl->pl_recalc_period) {
+ spin_unlock(&pl->pl_lock);
RETURN(0);
}
pl->pl_recalc_time = cfs_time_current_sec();
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
- cfs_spin_unlock(&pl->pl_lock);
+ spin_unlock(&pl->pl_lock);
/*
* Do not cancel locks in case lru resize is disabled for this ns.
*/
ldlm_cli_pool_pop_slv(pl);
- cfs_spin_lock(&ns->ns_lock);
- unused = ns->ns_nr_unused;
- cfs_spin_unlock(&ns->ns_lock);
-
+ spin_lock(&ns->ns_lock);
+ unused = ns->ns_nr_unused;
+ spin_unlock(&ns->ns_lock);
+
if (nr) {
canceled = ldlm_cancel_lru(ns, nr, LDLM_ASYNC,
LDLM_CANCEL_SHRINK);
if (recalc_interval_sec <= 0)
goto recalc;
- cfs_spin_lock(&pl->pl_lock);
+ spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
/*
cfs_atomic_set(&pl->pl_grant_rate, 0);
cfs_atomic_set(&pl->pl_cancel_rate, 0);
}
- cfs_spin_unlock(&pl->pl_lock);
+ spin_unlock(&pl->pl_lock);
recalc:
if (pl->pl_ops->po_recalc != NULL) {
__u64 slv, clv;
__u32 limit;
- cfs_spin_lock(&pl->pl_lock);
+ spin_lock(&pl->pl_lock);
slv = pl->pl_server_lock_volume;
clv = pl->pl_client_lock_volume;
limit = ldlm_pool_get_limit(pl);
grant_speed = grant_rate - cancel_rate;
lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
- cfs_spin_unlock(&pl->pl_lock);
+ spin_unlock(&pl->pl_lock);
nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
pl->pl_name);
}
static int lprocfs_rd_grant_speed(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
- struct ldlm_pool *pl = data;
- int grant_speed;
-
- cfs_spin_lock(&pl->pl_lock);
- /* serialize with ldlm_pool_recalc */
- grant_speed = cfs_atomic_read(&pl->pl_grant_rate) -
- cfs_atomic_read(&pl->pl_cancel_rate);
- cfs_spin_unlock(&pl->pl_lock);
- return lprocfs_rd_uint(page, start, off, count, eof, &grant_speed);
+ struct ldlm_pool *pl = data;
+ int grant_speed;
+
+ spin_lock(&pl->pl_lock);
+ /* serialize with ldlm_pool_recalc */
+ grant_speed = cfs_atomic_read(&pl->pl_grant_rate) -
+ cfs_atomic_read(&pl->pl_cancel_rate);
+ spin_unlock(&pl->pl_lock);
+ return lprocfs_rd_uint(page, start, off, count, eof, &grant_speed);
}
LDLM_POOL_PROC_READER(grant_plan, int);
int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
int idx, ldlm_side_t client)
{
- int rc;
- ENTRY;
+ int rc;
+ ENTRY;
- cfs_spin_lock_init(&pl->pl_lock);
+ spin_lock_init(&pl->pl_lock);
cfs_atomic_set(&pl->pl_granted, 0);
pl->pl_recalc_time = cfs_time_current_sec();
cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
*/
__u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
{
- __u64 slv;
- cfs_spin_lock(&pl->pl_lock);
- slv = pl->pl_server_lock_volume;
- cfs_spin_unlock(&pl->pl_lock);
- return slv;
+ __u64 slv;
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_server_lock_volume;
+ spin_unlock(&pl->pl_lock);
+ return slv;
}
EXPORT_SYMBOL(ldlm_pool_get_slv);
*/
void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
{
- cfs_spin_lock(&pl->pl_lock);
- pl->pl_server_lock_volume = slv;
- cfs_spin_unlock(&pl->pl_lock);
+ spin_lock(&pl->pl_lock);
+ pl->pl_server_lock_volume = slv;
+ spin_unlock(&pl->pl_lock);
}
EXPORT_SYMBOL(ldlm_pool_set_slv);
*/
__u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
{
- __u64 slv;
- cfs_spin_lock(&pl->pl_lock);
- slv = pl->pl_client_lock_volume;
- cfs_spin_unlock(&pl->pl_lock);
- return slv;
+ __u64 slv;
+ spin_lock(&pl->pl_lock);
+ slv = pl->pl_client_lock_volume;
+ spin_unlock(&pl->pl_lock);
+ return slv;
}
EXPORT_SYMBOL(ldlm_pool_get_clv);
*/
void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
{
- cfs_spin_lock(&pl->pl_lock);
- pl->pl_client_lock_volume = clv;
- cfs_spin_unlock(&pl->pl_lock);
+ spin_lock(&pl->pl_lock);
+ pl->pl_client_lock_volume = clv;
+ spin_unlock(&pl->pl_lock);
}
EXPORT_SYMBOL(ldlm_pool_set_clv);
static struct ptlrpc_thread *ldlm_pools_thread;
static struct cfs_shrinker *ldlm_pools_srv_shrinker;
static struct cfs_shrinker *ldlm_pools_cli_shrinker;
-static cfs_completion_t ldlm_pools_comp;
+static struct completion ldlm_pools_comp;
/*
* Cancel \a nr locks from all namespaces (if possible). Returns number of
for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
nr_ns > 0; nr_ns--)
{
- cfs_mutex_lock(ldlm_namespace_lock(client));
+ mutex_lock(ldlm_namespace_lock(client));
if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_unlock(ldlm_namespace_lock(client));
cl_env_reexit(cookie);
return 0;
}
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_unlock(ldlm_namespace_lock(client));
total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
ldlm_namespace_put(ns);
}
/*
* Do not call shrink under ldlm_namespace_lock(client)
*/
- cfs_mutex_lock(ldlm_namespace_lock(client));
+ mutex_lock(ldlm_namespace_lock(client));
if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_unlock(ldlm_namespace_lock(client));
/*
* If list is empty, we can't return any @cached > 0,
* that probably would cause needless shrinker
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
ldlm_namespace_move_locked(ns, client);
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_unlock(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
cancel = 1 + nr_locks * nr / total;
/*
* Check all modest namespaces first.
*/
- cfs_mutex_lock(ldlm_namespace_lock(client));
+ mutex_lock(ldlm_namespace_lock(client));
cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
ns_list_chain)
{
}
ldlm_pool_setup(&ns->ns_pool, l);
}
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_unlock(ldlm_namespace_lock(client));
}
/*
* rid of potential deadlock on client nodes when canceling
* locks synchronously.
*/
- cfs_mutex_lock(ldlm_namespace_lock(client));
- if (cfs_list_empty(ldlm_namespace_list(client))) {
- cfs_mutex_unlock(ldlm_namespace_lock(client));
- break;
- }
- ns = ldlm_namespace_first_locked(client);
-
- cfs_spin_lock(&ns->ns_lock);
- /*
- * skip ns which is being freed, and we don't want to increase
- * its refcount again, not even temporarily. bz21519 & LU-499.
- */
- if (ns->ns_stopping) {
- skip = 1;
- } else {
- skip = 0;
- ldlm_namespace_get(ns);
- }
- cfs_spin_unlock(&ns->ns_lock);
-
- ldlm_namespace_move_locked(ns, client);
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_lock(ldlm_namespace_lock(client));
+ if (cfs_list_empty(ldlm_namespace_list(client))) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+ ns = ldlm_namespace_first_locked(client);
+
+ spin_lock(&ns->ns_lock);
+ /*
+ * skip ns which is being freed, and we don't want to increase
+ * its refcount again, not even temporarily. bz21519 & LU-499.
+ */
+ if (ns->ns_stopping) {
+ skip = 1;
+ } else {
+ skip = 0;
+ ldlm_namespace_get(ns);
+ }
+ spin_unlock(&ns->ns_lock);
+
+ ldlm_namespace_move_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
/*
* After setup is done - recalc the pool.
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
t_name, cfs_curproc_pid());
- cfs_complete_and_exit(&ldlm_pools_comp, 0);
+ complete_and_exit(&ldlm_pools_comp, 0);
}
static int ldlm_pools_thread_start(void)
if (ldlm_pools_thread == NULL)
RETURN(-ENOMEM);
- cfs_init_completion(&ldlm_pools_comp);
+ init_completion(&ldlm_pools_comp);
cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
/*
* This fixes possible race and oops due to accessing freed memory
* in pools thread.
*/
- cfs_wait_for_completion(&ldlm_pools_comp);
+ wait_for_completion(&ldlm_pools_comp);
OBD_FREE_PTR(ldlm_pools_thread);
ldlm_pools_thread = NULL;
EXIT;
interrupted_completion_wait, &lwd);
}
- if (imp != NULL) {
- cfs_spin_lock(&imp->imp_lock);
- lwd.lwd_conn_cnt = imp->imp_conn_cnt;
- cfs_spin_unlock(&imp->imp_lock);
- }
+ if (imp != NULL) {
+ spin_lock(&imp->imp_lock);
+ lwd.lwd_conn_cnt = imp->imp_conn_cnt;
+ spin_unlock(&imp->imp_lock);
+ }
if (ns_is_client(ldlm_lock_to_ns(lock)) &&
OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
* alive in cleanup time. Evil races are possible which may cause
* oops in that time.
*/
- cfs_write_lock(&obd->obd_pool_lock);
+ write_lock(&obd->obd_pool_lock);
obd->obd_pool_slv = new_slv;
obd->obd_pool_limit = new_limit;
- cfs_write_unlock(&obd->obd_pool_lock);
+ write_unlock(&obd->obd_pool_lock);
RETURN(0);
}
static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
int count, int max, int flags)
{
- ldlm_cancel_lru_policy_t pf;
- struct ldlm_lock *lock, *next;
- int added = 0, unused, remained;
- ENTRY;
+ ldlm_cancel_lru_policy_t pf;
+ struct ldlm_lock *lock, *next;
+ int added = 0, unused, remained;
+ ENTRY;
- cfs_spin_lock(&ns->ns_lock);
+ spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
remained = unused;
break;
LDLM_LOCK_GET(lock);
- cfs_spin_unlock(&ns->ns_lock);
+ spin_unlock(&ns->ns_lock);
lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
/* Pass the lock through the policy filter and see if it
lu_ref_del(&lock->l_reference,
__FUNCTION__, cfs_current());
LDLM_LOCK_RELEASE(lock);
- cfs_spin_lock(&ns->ns_lock);
- break;
- }
- if (result == LDLM_POLICY_SKIP_LOCK) {
- lu_ref_del(&lock->l_reference,
- __FUNCTION__, cfs_current());
- LDLM_LOCK_RELEASE(lock);
- cfs_spin_lock(&ns->ns_lock);
+ spin_lock(&ns->ns_lock);
+ break;
+ }
+ if (result == LDLM_POLICY_SKIP_LOCK) {
+ lu_ref_del(&lock->l_reference,
+ __func__, cfs_current());
+ LDLM_LOCK_RELEASE(lock);
+ spin_lock(&ns->ns_lock);
continue;
}
lu_ref_del(&lock->l_reference,
__FUNCTION__, cfs_current());
LDLM_LOCK_RELEASE(lock);
- cfs_spin_lock(&ns->ns_lock);
+ spin_lock(&ns->ns_lock);
continue;
}
LASSERT(!lock->l_readers && !lock->l_writers);
cfs_list_add(&lock->l_bl_ast, cancels);
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
- cfs_spin_lock(&ns->ns_lock);
- added++;
- unused--;
- }
- cfs_spin_unlock(&ns->ns_lock);
- RETURN(added);
+ spin_lock(&ns->ns_lock);
+ added++;
+ unused--;
+ }
+ spin_unlock(&ns->ns_lock);
+ RETURN(added);
}
int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
cfs_atomic_t ldlm_srv_namespace_nr = CFS_ATOMIC_INIT(0);
cfs_atomic_t ldlm_cli_namespace_nr = CFS_ATOMIC_INIT(0);
-cfs_mutex_t ldlm_srv_namespace_lock;
+struct mutex ldlm_srv_namespace_lock;
CFS_LIST_HEAD(ldlm_srv_namespace_list);
-cfs_mutex_t ldlm_cli_namespace_lock;
+struct mutex ldlm_cli_namespace_lock;
CFS_LIST_HEAD(ldlm_cli_namespace_list);
cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
- cfs_spin_lock_init(&ns->ns_lock);
+ spin_lock_init(&ns->ns_lock);
cfs_atomic_set(&ns->ns_bref, 0);
cfs_waitq_init(&ns->ns_waitq);
return;
}
- cfs_spin_lock(&ns->ns_lock);
- ns->ns_stopping = 1;
- cfs_spin_unlock(&ns->ns_lock);
+ spin_lock(&ns->ns_lock);
+ ns->ns_stopping = 1;
+ spin_unlock(&ns->ns_lock);
/*
* Can fail with -EINTR when force == 0 in which case try harder.
void ldlm_namespace_put(struct ldlm_namespace *ns)
{
- if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
- cfs_waitq_signal(&ns->ns_waitq);
- cfs_spin_unlock(&ns->ns_lock);
- }
+ if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
+ cfs_waitq_signal(&ns->ns_waitq);
+ spin_unlock(&ns->ns_lock);
+ }
}
EXPORT_SYMBOL(ldlm_namespace_put);
/* Register @ns in the list of namespaces */
void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
{
- cfs_mutex_lock(ldlm_namespace_lock(client));
- LASSERT(cfs_list_empty(&ns->ns_list_chain));
- cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
- cfs_atomic_inc(ldlm_namespace_nr(client));
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_lock(ldlm_namespace_lock(client));
+ LASSERT(cfs_list_empty(&ns->ns_list_chain));
+ cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
+ cfs_atomic_inc(ldlm_namespace_nr(client));
+ mutex_unlock(ldlm_namespace_lock(client));
}
/* Unregister @ns from the list of namespaces */
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
{
- cfs_mutex_lock(ldlm_namespace_lock(client));
- LASSERT(!cfs_list_empty(&ns->ns_list_chain));
- /*
- * Some asserts and possibly other parts of code still using
- * list_empty(&ns->ns_list_chain). This is why it is important
- * to use list_del_init() here.
- */
- cfs_list_del_init(&ns->ns_list_chain);
- cfs_atomic_dec(ldlm_namespace_nr(client));
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_lock(ldlm_namespace_lock(client));
+ LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+ /*
+ * Some asserts and possibly other parts of code still using
+ * list_empty(&ns->ns_list_chain). This is why it is important
+ * to use list_del_init() here.
+ */
+ cfs_list_del_init(&ns->ns_list_chain);
+ cfs_atomic_dec(ldlm_namespace_nr(client));
+ mutex_unlock(ldlm_namespace_lock(client));
}
/* Should be called under ldlm_namespace_lock(client) taken */
}
cfs_atomic_set(&res->lr_refcount, 1);
- cfs_spin_lock_init(&res->lr_lock);
- lu_ref_init(&res->lr_reference);
+ spin_lock_init(&res->lr_lock);
+ lu_ref_init(&res->lr_reference);
- /* one who creates the resource must unlock
- * the mutex after lvb initialization */
- cfs_mutex_init(&res->lr_lvb_mutex);
- cfs_mutex_lock(&res->lr_lvb_mutex);
+ /* one who creates the resource must unlock
+ * the mutex after lvb initialization */
+ mutex_init(&res->lr_lvb_mutex);
+ mutex_lock(&res->lr_lvb_mutex);
- return res;
+ return res;
}
/* Args: unlocked namespace
res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* synchronize WRT resource creation */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- cfs_mutex_lock(&res->lr_lvb_mutex);
- cfs_mutex_unlock(&res->lr_lvb_mutex);
+ mutex_lock(&res->lr_lvb_mutex);
+ mutex_unlock(&res->lr_lvb_mutex);
}
return res;
}
/* clean lu_ref for failed resource */
lu_ref_fini(&res->lr_reference);
/* We have taken lr_lvb_mutex. Drop it. */
- cfs_mutex_unlock(&res->lr_lvb_mutex);
+ mutex_unlock(&res->lr_lvb_mutex);
OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* synchronize WRT resource creation */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- cfs_mutex_lock(&res->lr_lvb_mutex);
- cfs_mutex_unlock(&res->lr_lvb_mutex);
+ mutex_lock(&res->lr_lvb_mutex);
+ mutex_unlock(&res->lr_lvb_mutex);
}
return res;
}
}
/* we create resource with locked lr_lvb_mutex */
- cfs_mutex_unlock(&res->lr_lvb_mutex);
+ mutex_unlock(&res->lr_lvb_mutex);
return res;
}
if (!((libcfs_debug | D_ERROR) & level))
return;
- cfs_mutex_lock(ldlm_namespace_lock(client));
+ mutex_lock(ldlm_namespace_lock(client));
cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
struct ldlm_namespace *ns;
ldlm_namespace_dump(level, ns);
}
- cfs_mutex_unlock(ldlm_namespace_lock(client));
+ mutex_unlock(ldlm_namespace_lock(client));
}
EXPORT_SYMBOL(ldlm_dump_all_namespaces);
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_hash_dump,
(void *)(unsigned long)level);
- cfs_spin_lock(&ns->ns_lock);
- ns->ns_next_dump = cfs_time_shift(10);
- cfs_spin_unlock(&ns->ns_lock);
+ spin_lock(&ns->ns_lock);
+ ns->ns_next_dump = cfs_time_shift(10);
+ spin_unlock(&ns->ns_lock);
}
EXPORT_SYMBOL(ldlm_namespace_dump);
ibits = MDS_INODELOCK_LOOKUP;
if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
goto do_lock;
- cfs_mutex_lock(&lli->lli_och_mutex);
+ mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Everything is open already, do nothing */
/*(*och_usecount)++; Do not let them steal our open
handle from under us */
hope the lock won't be invalidated in between. But
if it would be, we'll reopen the open request to
MDS later during file open path */
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
RETURN(1);
} else {
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
}
}
}
ldlm_lock_dump_handle(D_OTHER, &lockh);
- cfs_mutex_lock(&lli->lli_readdir_mutex);
+ mutex_lock(&lli->lli_readdir_mutex);
page = ll_dir_page_locate(dir, &lhash, &start, &end);
if (IS_ERR(page)) {
CERROR("dir page locate: "DFID" at "LPU64": rc %ld\n",
goto fail;
}
out_unlock:
- cfs_mutex_unlock(&lli->lli_readdir_mutex);
+ mutex_unlock(&lli->lli_readdir_mutex);
ldlm_lock_decref(&lockh, mode);
return page;
loff_t ret = -EINVAL;
ENTRY;
- cfs_mutex_lock(&inode->i_mutex);
+ mutex_lock(&inode->i_mutex);
switch (origin) {
case SEEK_SET:
break;
GOTO(out, ret);
out:
- cfs_mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&inode->i_mutex);
return ret;
}
och_usecount = &lli->lli_open_fd_read_count;
}
- cfs_mutex_lock(&lli->lli_och_mutex);
+ mutex_lock(&lli->lli_och_mutex);
if (*och_usecount) { /* There are still users of this handle, so
skip freeing it. */
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
RETURN(0);
}
och=*och_p;
*och_p = NULL;
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
if (och) { /* There might be a race and somebody have freed this och
already */
struct inode *inode = file->f_dentry->d_inode;
ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
- cfs_mutex_lock(&lli->lli_och_mutex);
+ mutex_lock(&lli->lli_och_mutex);
if (fd->fd_omode & FMODE_WRITE) {
lockmode = LCK_CW;
LASSERT(lli->lli_open_fd_write_count);
LASSERT(lli->lli_open_fd_read_count);
lli->lli_open_fd_read_count--;
}
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
LDLM_IBITS, &policy, lockmode,
fd->fd_file = file;
if (S_ISDIR(inode->i_mode)) {
- cfs_spin_lock(&lli->lli_sa_lock);
- if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
- lli->lli_opendir_pid == 0) {
- lli->lli_opendir_key = fd;
- lli->lli_opendir_pid = cfs_curproc_pid();
- opendir_set = 1;
- }
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_lock(&lli->lli_sa_lock);
+ if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
+ lli->lli_opendir_pid == 0) {
+ lli->lli_opendir_key = fd;
+ lli->lli_opendir_pid = cfs_curproc_pid();
+ opendir_set = 1;
+ }
+ spin_unlock(&lli->lli_sa_lock);
}
if (inode->i_sb->s_root == file->f_dentry) {
och_usecount = &lli->lli_open_fd_read_count;
}
- cfs_mutex_lock(&lli->lli_och_mutex);
+ mutex_lock(&lli->lli_och_mutex);
if (*och_p) { /* Open handle is present */
if (it_disposition(it, DISP_OPEN_OPEN)) {
/* Well, there's extra open request that we do not need,
let's close it somehow. This will decref request. */
rc = it_open_error(DISP_OPEN_OPEN, it);
if (rc) {
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
GOTO(out_openerr, rc);
}
rc = ll_local_open(file, it, fd, NULL);
if (rc) {
(*och_usecount)--;
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
GOTO(out_openerr, rc);
}
} else {
could be cancelled, and since blocking ast handler
would attempt to grab och_mutex as well, that would
result in a deadlock */
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
it->it_create_mode |= M_CHECK_STALE;
rc = ll_intent_file_open(file, NULL, 0, it);
it->it_create_mode &= ~M_CHECK_STALE;
if (rc)
GOTO(out_och_free, rc);
}
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
fd = NULL;
/* Must do this outside lli_och_mutex lock to prevent deadlock where
*och_p = NULL; /* OBD_FREE writes some magic there */
(*och_usecount)--;
}
- cfs_mutex_unlock(&lli->lli_och_mutex);
+ mutex_unlock(&lli->lli_och_mutex);
out_openerr:
if (opendir_set != 0)
#endif
if ((iot == CIT_WRITE) &&
!(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- if (cfs_mutex_lock_interruptible(&lli->
+ if (mutex_lock_interruptible(&lli->
lli_write_mutex))
GOTO(out, result = -ERESTARTSYS);
write_mutex_locked = 1;
} else if (iot == CIT_READ) {
- cfs_down_read(&lli->lli_trunc_sem);
+ down_read(&lli->lli_trunc_sem);
}
break;
case IO_SENDFILE:
}
result = cl_io_loop(env, io);
if (write_mutex_locked)
- cfs_mutex_unlock(&lli->lli_write_mutex);
+ mutex_unlock(&lli->lli_write_mutex);
else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
- cfs_up_read(&lli->lli_trunc_sem);
+ up_read(&lli->lli_trunc_sem);
} else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
if (ll_file_nolock(file))
RETURN(-EOPNOTSUPP);
- cfs_spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- CWARN("group lock already existed with gid %lu\n",
- fd->fd_grouplock.cg_gid);
- cfs_spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
- }
- LASSERT(fd->fd_grouplock.cg_lock == NULL);
- cfs_spin_unlock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ CWARN("group lock already existed with gid %lu\n",
+ fd->fd_grouplock.cg_gid);
+ spin_unlock(&lli->lli_lock);
+ RETURN(-EINVAL);
+ }
+ LASSERT(fd->fd_grouplock.cg_lock == NULL);
+ spin_unlock(&lli->lli_lock);
- rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
- arg, (file->f_flags & O_NONBLOCK), &grouplock);
- if (rc)
- RETURN(rc);
+ rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+ arg, (file->f_flags & O_NONBLOCK), &grouplock);
+ if (rc)
+ RETURN(rc);
- cfs_spin_lock(&lli->lli_lock);
- if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
- cfs_spin_unlock(&lli->lli_lock);
- CERROR("another thread just won the race\n");
- cl_put_grouplock(&grouplock);
- RETURN(-EINVAL);
- }
+ spin_lock(&lli->lli_lock);
+ if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+ spin_unlock(&lli->lli_lock);
+ CERROR("another thread just won the race\n");
+ cl_put_grouplock(&grouplock);
+ RETURN(-EINVAL);
+ }
- fd->fd_flags |= LL_FILE_GROUP_LOCKED;
- fd->fd_grouplock = grouplock;
- cfs_spin_unlock(&lli->lli_lock);
+ fd->fd_flags |= LL_FILE_GROUP_LOCKED;
+ fd->fd_grouplock = grouplock;
+ spin_unlock(&lli->lli_lock);
- CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
- RETURN(0);
+ CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
+ RETURN(0);
}
int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
- ENTRY;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ struct ccc_grouplock grouplock;
+ ENTRY;
- cfs_spin_lock(&lli->lli_lock);
- if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- cfs_spin_unlock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
+ if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ spin_unlock(&lli->lli_lock);
CWARN("no group lock held\n");
RETURN(-EINVAL);
}
if (fd->fd_grouplock.cg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
arg, fd->fd_grouplock.cg_gid);
- cfs_spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
- }
+ spin_unlock(&lli->lli_lock);
+ RETURN(-EINVAL);
+ }
- grouplock = fd->fd_grouplock;
- memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
- fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
- cfs_spin_unlock(&lli->lli_lock);
+ grouplock = fd->fd_grouplock;
+ memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
+ fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
+ spin_unlock(&lli->lli_lock);
- cl_put_grouplock(&grouplock);
- CDEBUG(D_INFO, "group lock %lu released\n", arg);
- RETURN(0);
+ cl_put_grouplock(&grouplock);
+ CDEBUG(D_INFO, "group lock %lu released\n", arg);
+ RETURN(0);
}
/**
struct posix_acl *acl = NULL;
ENTRY;
- cfs_spin_lock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
/* VFS' acl_permission_check->check_acl will release the refcount */
acl = posix_acl_dup(lli->lli_posix_acl);
- cfs_spin_unlock(&lli->lli_lock);
+ spin_unlock(&lli->lli_lock);
RETURN(acl);
}
/* dynamic ioctl number support routins */
static struct llioc_ctl_data {
- cfs_rw_semaphore_t ioc_sem;
+ struct rw_semaphore ioc_sem;
cfs_list_t ioc_head;
} llioc = {
__RWSEM_INITIALIZER(llioc.ioc_sem),
in_data->iocd_count = count;
memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
- cfs_down_write(&llioc.ioc_sem);
+ down_write(&llioc.ioc_sem);
cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
- cfs_up_write(&llioc.ioc_sem);
+ up_write(&llioc.ioc_sem);
RETURN(in_data);
}
if (magic == NULL)
return;
- cfs_down_write(&llioc.ioc_sem);
+ down_write(&llioc.ioc_sem);
cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
if (tmp == magic) {
unsigned int size = tmp->iocd_size;
cfs_list_del(&tmp->iocd_list);
- cfs_up_write(&llioc.ioc_sem);
+ up_write(&llioc.ioc_sem);
OBD_FREE(tmp, size);
return;
}
}
- cfs_up_write(&llioc.ioc_sem);
+ up_write(&llioc.ioc_sem);
CWARN("didn't find iocontrol register block with magic: %p\n", magic);
}
struct llioc_data *data;
int rc = -EINVAL, i;
- cfs_down_read(&llioc.ioc_sem);
+ down_read(&llioc.ioc_sem);
cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
for (i = 0; i < data->iocd_count; i++) {
if (cmd != data->iocd_cmd[i])
if (ret == LLIOC_STOP)
break;
}
- cfs_up_read(&llioc.ioc_sem);
+ up_read(&llioc.ioc_sem);
if (rcp)
*rcp = rc;
RETURN(PTR_ERR(op_data));
/* take layout lock mutex to enqueue layout lock exclusively. */
- cfs_mutex_lock(&lli->lli_layout_mutex);
+ mutex_lock(&lli->lli_layout_mutex);
/* try again inside layout mutex */
mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh,
*gen = lli->lli_layout_gen + 1;
ldlm_lock_decref(&lockh, mode);
- cfs_mutex_unlock(&lli->lli_layout_mutex);
+ mutex_unlock(&lli->lli_layout_mutex);
ll_finish_md_op_data(op_data);
RETURN(0);
}
}
ll_intent_drop_lock(&it);
- cfs_mutex_unlock(&lli->lli_layout_mutex);
+ mutex_unlock(&lli->lli_layout_mutex);
ll_finish_md_op_data(op_data);
RETURN(rc);
static inline int have_expired_capa(void)
{
- struct obd_capa *ocapa = NULL;
- int expired = 0;
+ struct obd_capa *ocapa = NULL;
+ int expired = 0;
- /* if ll_capa_list has client capa to expire or ll_idle_capas has
- * expired capa, return 1.
- */
- cfs_spin_lock(&capa_lock);
+ /* if ll_capa_list has client capa to expire or ll_idle_capas has
+ * expired capa, return 1.
+ */
+ spin_lock(&capa_lock);
if (!cfs_list_empty(ll_capa_list)) {
ocapa = cfs_list_entry(ll_capa_list->next, struct obd_capa,
c_list);
if (!expired)
update_capa_timer(ocapa, ocapa->c_expiry);
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
- if (expired)
- DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
- return expired;
+ if (expired)
+ DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
+ return expired;
}
static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
next = NULL;
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
cfs_list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
__u64 ibits;
capa_get(ocapa);
ll_capa_renewed++;
- cfs_spin_unlock(&capa_lock);
- rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
- ll_update_capa);
- cfs_spin_lock(&capa_lock);
+ spin_unlock(&capa_lock);
+ rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
+ ll_update_capa);
+ spin_lock(&capa_lock);
if (rc) {
DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
"renew failed: %d", rc);
ll_delete_capa(ocapa);
}
- cfs_spin_unlock(&capa_lock);
- }
+ spin_unlock(&capa_lock);
+ }
- thread_set_flags(&ll_capa_thread, SVC_STOPPED);
- cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
- RETURN(0);
+ thread_set_flags(&ll_capa_thread, SVC_STOPPED);
+ cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+ RETURN(0);
}
void ll_capa_timer_callback(unsigned long unused)
LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
opc == CAPA_OPC_OSS_TRUNC);
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
if (capa_is_expired(ocapa))
continue;
cfs_atomic_set(&ll_capa_debug, 0);
}
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
- RETURN(ocapa);
+ RETURN(ocapa);
}
EXPORT_SYMBOL(ll_osscapa_get);
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
RETURN(NULL);
- cfs_spin_lock(&capa_lock);
- ocapa = capa_get(lli->lli_mds_capa);
- cfs_spin_unlock(&capa_lock);
+ spin_lock(&capa_lock);
+ ocapa = capa_get(lli->lli_mds_capa);
+ spin_unlock(&capa_lock);
if (!ocapa && cfs_atomic_read(&ll_capa_debug)) {
CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
cfs_atomic_set(&ll_capa_debug, 0);
DEBUG_CAPA(D_SEC, capa, "add MDS");
} else {
- cfs_spin_lock(&old->c_lock);
- old->c_capa = *capa;
- cfs_spin_unlock(&old->c_lock);
+ spin_lock(&old->c_lock);
+ old->c_capa = *capa;
+ spin_unlock(&old->c_lock);
DEBUG_CAPA(D_SEC, capa, "update MDS");
DEBUG_CAPA(D_SEC, capa, "add OSS");
} else {
- cfs_spin_lock(&old->c_lock);
- old->c_capa = *capa;
- cfs_spin_unlock(&old->c_lock);
+ spin_lock(&old->c_lock);
+ old->c_capa = *capa;
+ spin_unlock(&old->c_lock);
DEBUG_CAPA(D_SEC, capa, "update OSS");
struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
{
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
do_add_oss_capa(inode, ocapa);
update_capa_timer(ocapa, capa_renewal_time(ocapa));
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
- cfs_atomic_set(&ll_capa_debug, 1);
- return ocapa;
+ cfs_atomic_set(&ll_capa_debug, 1);
+ return ocapa;
}
static inline void delay_capa_renew(struct obd_capa *oc, cfs_time_t delay)
if (IS_ERR(capa)) {
/* set error code */
rc = PTR_ERR(capa);
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
if (rc == -ENOENT) {
DEBUG_CAPA(D_SEC, &ocapa->c_capa,
"renewal canceled because object removed");
cfs_list_del_init(&ocapa->c_list);
sort_add_capa(ocapa, &ll_idle_capas);
- cfs_spin_unlock(&capa_lock);
-
- capa_put(ocapa);
- iput(inode);
- RETURN(rc);
- }
-
- cfs_spin_lock(&ocapa->c_lock);
- LASSERT(!memcmp(&ocapa->c_capa, capa,
- offsetof(struct lustre_capa, lc_opc)));
- ocapa->c_capa = *capa;
- set_capa_expiry(ocapa);
- cfs_spin_unlock(&ocapa->c_lock);
-
- cfs_spin_lock(&capa_lock);
- if (capa_for_oss(capa))
- inode_add_oss_capa(inode, ocapa);
- DEBUG_CAPA(D_SEC, capa, "renew");
- EXIT;
+ spin_unlock(&capa_lock);
+
+ capa_put(ocapa);
+ iput(inode);
+ RETURN(rc);
+ }
+
+ spin_lock(&ocapa->c_lock);
+ LASSERT(!memcmp(&ocapa->c_capa, capa,
+ offsetof(struct lustre_capa, lc_opc)));
+ ocapa->c_capa = *capa;
+ set_capa_expiry(ocapa);
+ spin_unlock(&ocapa->c_lock);
+
+ spin_lock(&capa_lock);
+ if (capa_for_oss(capa))
+ inode_add_oss_capa(inode, ocapa);
+ DEBUG_CAPA(D_SEC, capa, "renew");
+ EXIT;
retry:
- cfs_list_del_init(&ocapa->c_list);
- sort_add_capa(ocapa, ll_capa_list);
- update_capa_timer(ocapa, capa_renewal_time(ocapa));
- cfs_spin_unlock(&capa_lock);
-
- capa_put(ocapa);
- iput(inode);
- return rc;
+ cfs_list_del_init(&ocapa->c_list);
+ sort_add_capa(ocapa, ll_capa_list);
+ update_capa_timer(ocapa, capa_renewal_time(ocapa));
+ spin_unlock(&capa_lock);
+
+ capa_put(ocapa);
+ iput(inode);
+ return rc;
}
void ll_capa_open(struct inode *inode)
/* release ref when find */
capa_put(ocapa);
if (likely(ocapa->c_capa.lc_opc == CAPA_OPC_OSS_TRUNC)) {
- cfs_spin_lock(&capa_lock);
- ll_delete_capa(ocapa);
- cfs_spin_unlock(&capa_lock);
- }
+ spin_lock(&capa_lock);
+ ll_delete_capa(ocapa);
+ spin_unlock(&capa_lock);
+ }
}
void ll_clear_inode_capas(struct inode *inode)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct obd_capa *ocapa, *tmp;
-
- cfs_spin_lock(&capa_lock);
- ocapa = lli->lli_mds_capa;
- if (ocapa)
- ll_delete_capa(ocapa);
-
- cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
- u.cli.lli_list)
- ll_delete_capa(ocapa);
- cfs_spin_unlock(&capa_lock);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *ocapa, *tmp;
+
+ spin_lock(&capa_lock);
+ ocapa = lli->lli_mds_capa;
+ if (ocapa)
+ ll_delete_capa(ocapa);
+
+ cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
+ u.cli.lli_list)
+ ll_delete_capa(ocapa);
+ spin_unlock(&capa_lock);
}
void ll_print_capa_stat(struct ll_sb_info *sbi)
/** records that a write is in flight */
void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
-
- ENTRY;
- cfs_spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
- cfs_list_add(&page->cpg_pending_linkage,
- &club->cob_pending_list);
- cfs_spin_unlock(&lli->lli_lock);
- EXIT;
+ struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+
+ ENTRY;
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_SOM_DIRTY;
+ if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
+ cfs_list_add(&page->cpg_pending_linkage,
+ &club->cob_pending_list);
+ spin_unlock(&lli->lli_lock);
+ EXIT;
}
/** records that a write has completed */
void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
- int rc = 0;
-
- ENTRY;
- cfs_spin_lock(&lli->lli_lock);
- if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
- cfs_list_del_init(&page->cpg_pending_linkage);
- rc = 1;
- }
- cfs_spin_unlock(&lli->lli_lock);
- if (rc)
- ll_queue_done_writing(club->cob_inode, 0);
- EXIT;
+ struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+ int rc = 0;
+
+ ENTRY;
+ spin_lock(&lli->lli_lock);
+ if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
+ cfs_list_del_init(&page->cpg_pending_linkage);
+ rc = 1;
+ }
+ spin_unlock(&lli->lli_lock);
+ if (rc)
+ ll_queue_done_writing(club->cob_inode, 0);
+ EXIT;
}
/** Queues DONE_WRITING if
* - inode has no no dirty pages; */
void ll_queue_done_writing(struct inode *inode, unsigned long flags)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
- ENTRY;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+ ENTRY;
- cfs_spin_lock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
lli->lli_flags |= flags;
if ((lli->lli_flags & LLIF_DONE_WRITING) &&
inode->i_ino, inode->i_generation,
lli->lli_flags);
/* DONE_WRITING is allowed and inode has no dirty page. */
- cfs_spin_lock(&lcq->lcq_lock);
+ spin_lock(&lcq->lcq_lock);
LASSERT(cfs_list_empty(&lli->lli_close_list));
CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
lli->lli_flags &= ~LLIF_DONE_WRITING;
cfs_waitq_signal(&lcq->lcq_waitq);
- cfs_spin_unlock(&lcq->lcq_lock);
- }
- cfs_spin_unlock(&lli->lli_lock);
- EXIT;
+ spin_unlock(&lcq->lcq_lock);
+ }
+ spin_unlock(&lli->lli_lock);
+ EXIT;
}
/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
- struct obd_client_handle **och, unsigned long flags)
+ struct obd_client_handle **och, unsigned long flags)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
- ENTRY;
-
- cfs_spin_lock(&lli->lli_lock);
- if (!(cfs_list_empty(&club->cob_pending_list))) {
- if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
- LASSERT(*och != NULL);
- LASSERT(lli->lli_pending_och == NULL);
- /* Inode is dirty and there is no pending write done
- * request yet, DONE_WRITE is to be sent later. */
- lli->lli_flags |= LLIF_EPOCH_PENDING;
- lli->lli_pending_och = *och;
- cfs_spin_unlock(&lli->lli_lock);
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+ ENTRY;
+
+ spin_lock(&lli->lli_lock);
+ if (!(cfs_list_empty(&club->cob_pending_list))) {
+ if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
+ LASSERT(*och != NULL);
+ LASSERT(lli->lli_pending_och == NULL);
+ /* Inode is dirty and there is no pending write done
+ * request yet, DONE_WRITE is to be sent later. */
+ lli->lli_flags |= LLIF_EPOCH_PENDING;
+ lli->lli_pending_och = *och;
+ spin_unlock(&lli->lli_lock);
inode = igrab(inode);
LASSERT(inode);
* and try DONE_WRITE again later. */
LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
lli->lli_flags |= LLIF_DONE_WRITING;
- cfs_spin_unlock(&lli->lli_lock);
+ spin_unlock(&lli->lli_lock);
inode = igrab(inode);
LASSERT(inode);
} else {
/* Pack Size-on-MDS inode attributes only if they has changed */
if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
- cfs_spin_unlock(&lli->lli_lock);
- GOTO(out, 0);
- }
-
- /* There is a pending DONE_WRITE -- close epoch with no
- * attribute change. */
- if (lli->lli_flags & LLIF_EPOCH_PENDING) {
- cfs_spin_unlock(&lli->lli_lock);
- GOTO(out, 0);
- }
- }
-
- LASSERT(cfs_list_empty(&club->cob_pending_list));
- lli->lli_flags &= ~LLIF_SOM_DIRTY;
- cfs_spin_unlock(&lli->lli_lock);
- ll_done_writing_attr(inode, op_data);
-
- EXIT;
+ spin_unlock(&lli->lli_lock);
+ GOTO(out, 0);
+ }
+
+ /* There is a pending DONE_WRITE -- close epoch with no
+ * attribute change. */
+ if (lli->lli_flags & LLIF_EPOCH_PENDING) {
+ spin_unlock(&lli->lli_lock);
+ GOTO(out, 0);
+ }
+ }
+
+ LASSERT(cfs_list_empty(&club->cob_pending_list));
+ lli->lli_flags &= ~LLIF_SOM_DIRTY;
+ spin_unlock(&lli->lli_lock);
+ ll_done_writing_attr(inode, op_data);
+
+ EXIT;
out:
- return;
+ return;
}
/**
static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
{
- struct ll_inode_info *lli = NULL;
+ struct ll_inode_info *lli = NULL;
- cfs_spin_lock(&lcq->lcq_lock);
+ spin_lock(&lcq->lcq_lock);
if (!cfs_list_empty(&lcq->lcq_head)) {
lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info,
} else if (cfs_atomic_read(&lcq->lcq_stop))
lli = ERR_PTR(-EALREADY);
- cfs_spin_unlock(&lcq->lcq_lock);
- return lli;
+ spin_unlock(&lcq->lcq_lock);
+ return lli;
}
static int ll_close_thread(void *arg)
cfs_daemonize(name);
}
- cfs_complete(&lcq->lcq_comp);
+ complete(&lcq->lcq_comp);
while (1) {
struct l_wait_info lwi = { 0 };
}
CDEBUG(D_INFO, "ll_close exiting\n");
- cfs_complete(&lcq->lcq_comp);
- RETURN(0);
+ complete(&lcq->lcq_comp);
+ RETURN(0);
}
int ll_close_thread_start(struct ll_close_queue **lcq_ret)
if (lcq == NULL)
return -ENOMEM;
- cfs_spin_lock_init(&lcq->lcq_lock);
- CFS_INIT_LIST_HEAD(&lcq->lcq_head);
- cfs_waitq_init(&lcq->lcq_waitq);
- cfs_init_completion(&lcq->lcq_comp);
+ spin_lock_init(&lcq->lcq_lock);
+ CFS_INIT_LIST_HEAD(&lcq->lcq_head);
+ cfs_waitq_init(&lcq->lcq_waitq);
+ init_completion(&lcq->lcq_comp);
- pid = cfs_create_thread(ll_close_thread, lcq, 0);
- if (pid < 0) {
- OBD_FREE(lcq, sizeof(*lcq));
- return pid;
- }
+ pid = cfs_create_thread(ll_close_thread, lcq, 0);
+ if (pid < 0) {
+ OBD_FREE(lcq, sizeof(*lcq));
+ return pid;
+ }
- cfs_wait_for_completion(&lcq->lcq_comp);
- *lcq_ret = lcq;
- return 0;
+ wait_for_completion(&lcq->lcq_comp);
+ *lcq_ret = lcq;
+ return 0;
}
void ll_close_thread_shutdown(struct ll_close_queue *lcq)
{
- cfs_init_completion(&lcq->lcq_comp);
- cfs_atomic_inc(&lcq->lcq_stop);
- cfs_waitq_signal(&lcq->lcq_waitq);
- cfs_wait_for_completion(&lcq->lcq_comp);
- OBD_FREE(lcq, sizeof(*lcq));
+ init_completion(&lcq->lcq_comp);
+ cfs_atomic_inc(&lcq->lcq_stop);
+ cfs_waitq_signal(&lcq->lcq_waitq);
+ wait_for_completion(&lcq->lcq_comp);
+ OBD_FREE(lcq, sizeof(*lcq));
}
};
struct ll_inode_info {
- __u32 lli_inode_magic;
- __u32 lli_flags;
- __u64 lli_ioepoch;
+ __u32 lli_inode_magic;
+ __u32 lli_flags;
+ __u64 lli_ioepoch;
- cfs_spinlock_t lli_lock;
- struct posix_acl *lli_posix_acl;
+ spinlock_t lli_lock;
+ struct posix_acl *lli_posix_acl;
- cfs_hlist_head_t *lli_remote_perms;
- cfs_mutex_t lli_rmtperm_mutex;
+ cfs_hlist_head_t *lli_remote_perms;
+ struct mutex lli_rmtperm_mutex;
/* identifying fields for both metadata and data stacks. */
struct lu_fid lli_fid;
__u64 lli_open_fd_write_count;
__u64 lli_open_fd_exec_count;
/* Protects access to och pointers and their usage counters */
- cfs_mutex_t lli_och_mutex;
+ struct mutex lli_och_mutex;
- struct inode lli_vfs_inode;
+ struct inode lli_vfs_inode;
- /* the most recent timestamps obtained from mds */
- struct ost_lvb lli_lvb;
- cfs_spinlock_t lli_agl_lock;
+ /* the most recent timestamps obtained from mds */
+ struct ost_lvb lli_lvb;
+ spinlock_t lli_agl_lock;
- /* Try to make the d::member and f::member are aligned. Before using
- * these members, make clear whether it is directory or not. */
- union {
- /* for directory */
- struct {
- /* serialize normal readdir and statahead-readdir. */
- cfs_mutex_t d_readdir_mutex;
+ /* Try to make the d::member and f::member are aligned. Before using
+ * these members, make clear whether it is directory or not. */
+ union {
+ /* for directory */
+ struct {
+ /* serialize normal readdir and statahead-readdir. */
+ struct mutex d_readdir_mutex;
/* metadata statahead */
/* since parent-child threads can share the same @file
struct ll_statahead_info *d_sai;
struct posix_acl *d_def_acl;
/* protect statahead stuff. */
- cfs_spinlock_t d_sa_lock;
- /* "opendir_pid" is the token when lookup/revalid
- * -- I am the owner of dir statahead. */
- pid_t d_opendir_pid;
- } d;
+ spinlock_t d_sa_lock;
+ /* "opendir_pid" is the token when lookup/revalid
+ * -- I am the owner of dir statahead. */
+ pid_t d_opendir_pid;
+ } d;
#define lli_readdir_mutex u.d.d_readdir_mutex
#define lli_opendir_key u.d.d_opendir_key
#define lli_sa_lock u.d.d_sa_lock
#define lli_opendir_pid u.d.d_opendir_pid
- /* for non-directory */
- struct {
- cfs_semaphore_t f_size_sem;
- void *f_size_sem_owner;
- char *f_symlink_name;
- __u64 f_maxbytes;
- /*
- * cfs_rw_semaphore_t {
- * signed long count; // align u.d.d_def_acl
- * cfs_spinlock_t wait_lock; // align u.d.d_sa_lock
- * struct list_head wait_list;
- * }
- */
- cfs_rw_semaphore_t f_trunc_sem;
- cfs_mutex_t f_write_mutex;
+ /* for non-directory */
+ struct {
+ struct semaphore f_size_sem;
+ void *f_size_sem_owner;
+ char *f_symlink_name;
+ __u64 f_maxbytes;
+ /*
+ * struct rw_semaphore {
+ * signed long count; // align d.d_def_acl
+ * spinlock_t wait_lock; // align d.d_sa_lock
+ * struct list_head wait_list;
+ * }
+ */
+ struct rw_semaphore f_trunc_sem;
+ struct mutex f_write_mutex;
- cfs_rw_semaphore_t f_glimpse_sem;
+ struct rw_semaphore f_glimpse_sem;
cfs_time_t f_glimpse_time;
cfs_list_t f_agl_list;
__u64 f_agl_index;
- /* for writepage() only to communicate to fsync */
- int f_async_rc;
+ /* for writepage() only to communicate to fsync */
+ int f_async_rc;
/*
* whenever a process try to read/write the file, the
struct cl_object *lli_clob;
/* mutex to request for layout lock exclusively. */
- cfs_mutex_t lli_layout_mutex;
+ struct mutex lli_layout_mutex;
/* valid only inside LAYOUT ibits lock, protected by lli_layout_mutex */
__u32 lli_layout_gen;
};
};
struct rmtacl_ctl_table {
- cfs_spinlock_t rct_lock;
- cfs_list_t rct_entries[RCE_HASHES];
+ spinlock_t rct_lock;
+ cfs_list_t rct_entries[RCE_HASHES];
};
#define EE_HASHES 32
};
struct eacl_table {
- cfs_spinlock_t et_lock;
- cfs_list_t et_entries[EE_HASHES];
+ spinlock_t et_lock;
+ cfs_list_t et_entries[EE_HASHES];
};
struct ll_sb_info {
- cfs_list_t ll_list;
- /* this protects pglist and ra_info. It isn't safe to
- * grab from interrupt contexts */
- cfs_spinlock_t ll_lock;
- cfs_spinlock_t ll_pp_extent_lock; /* Lock for pp_extent entries */
- cfs_spinlock_t ll_process_lock; /* Lock for ll_rw_process_info */
+ cfs_list_t ll_list;
+ /* this protects pglist and ra_info. It isn't safe to
+ * grab from interrupt contexts */
+ spinlock_t ll_lock;
+ spinlock_t ll_pp_extent_lock; /* pp_extent entry*/
+ spinlock_t ll_process_lock; /* ll_rw_process_info */
struct obd_uuid ll_sb_uuid;
struct obd_export *ll_md_exp;
struct obd_export *ll_dt_exp;
* per file-descriptor read-ahead data.
*/
struct ll_readahead_state {
- cfs_spinlock_t ras_lock;
+ spinlock_t ras_lock;
/*
* index of the last page that read(2) needed and that wasn't in the
* cache. Used by ras_update() to detect seeks.
struct lov_stripe_md;
-extern cfs_spinlock_t inode_lock;
+extern spinlock_t inode_lock;
extern struct proc_dir_entry *proc_lustre_fs_root;
/* llite/llite_close.c */
struct ll_close_queue {
- cfs_spinlock_t lcq_lock;
- cfs_list_t lcq_head;
- cfs_waitq_t lcq_waitq;
- cfs_completion_t lcq_comp;
- cfs_atomic_t lcq_stop;
+ spinlock_t lcq_lock;
+ cfs_list_t lcq_head;
+ cfs_waitq_t lcq_waitq;
+ struct completion lcq_comp;
+ cfs_atomic_t lcq_stop;
};
struct ccc_object *cl_inode2ccc(struct inode *inode);
cfs_list_t sai_entries_stated; /* entries stated */
cfs_list_t sai_entries_agl; /* AGL entries to be sent */
cfs_list_t sai_cache[LL_SA_CACHE_SIZE];
- cfs_spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
- cfs_atomic_t sai_cache_count; /* entry count in cache */
+ spinlock_t sai_cache_lock[LL_SA_CACHE_SIZE];
+ cfs_atomic_t sai_cache_count; /* entry count in cache */
};
int do_statahead_enter(struct inode *dir, struct dentry **dentry,
static inline int ll_glimpse_size(struct inode *inode)
{
- struct ll_inode_info *lli = ll_i2info(inode);
- int rc;
-
- cfs_down_read(&lli->lli_glimpse_sem);
- rc = cl_glimpse_size(inode);
- lli->lli_glimpse_time = cfs_time_current();
- cfs_up_read(&lli->lli_glimpse_sem);
- return rc;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ int rc;
+
+ down_read(&lli->lli_glimpse_sem);
+ rc = cl_glimpse_size(inode);
+ lli->lli_glimpse_time = cfs_time_current();
+ up_read(&lli->lli_glimpse_sem);
+ return rc;
}
static inline void
#endif
#ifndef log2
-#define log2(n) cfs_ffz(~(n))
+#define log2(n) ffz(~(n))
#endif
static struct ll_sb_info *ll_init_sbi(void)
if (!sbi)
RETURN(NULL);
- cfs_spin_lock_init(&sbi->ll_lock);
- cfs_mutex_init(&sbi->ll_lco.lco_lock);
- cfs_spin_lock_init(&sbi->ll_pp_extent_lock);
- cfs_spin_lock_init(&sbi->ll_process_lock);
+ spin_lock_init(&sbi->ll_lock);
+ mutex_init(&sbi->ll_lco.lco_lock);
+ spin_lock_init(&sbi->ll_pp_extent_lock);
+ spin_lock_init(&sbi->ll_process_lock);
sbi->ll_rw_stats_on = 0;
si_meminfo(&si);
cfs_atomic_set(&sbi->ll_cache.ccc_users, 0);
sbi->ll_cache.ccc_lru_max = lru_page_max;
cfs_atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
- cfs_spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
+ spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
CFS_INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
- cfs_spin_lock(&ll_sb_lock);
- cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
- cfs_spin_unlock(&ll_sb_lock);
+ spin_lock(&ll_sb_lock);
+ cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
+ spin_unlock(&ll_sb_lock);
sbi->ll_flags |= LL_SBI_VERBOSE;
#ifdef ENABLE_CHECKSUM
#endif
for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
- cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
- pp_r_hist.oh_lock);
- cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
- pp_w_hist.oh_lock);
+ spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
+ pp_r_hist.oh_lock);
+ spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
+ pp_w_hist.oh_lock);
}
/* metadata statahead is enabled by default */
void ll_free_sbi(struct super_block *sb)
{
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- ENTRY;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ ENTRY;
- if (sbi != NULL) {
- cfs_spin_lock(&ll_sb_lock);
- cfs_list_del(&sbi->ll_list);
- cfs_spin_unlock(&ll_sb_lock);
- OBD_FREE(sbi, sizeof(*sbi));
- }
- EXIT;
+ if (sbi != NULL) {
+ spin_lock(&ll_sb_lock);
+ cfs_list_del(&sbi->ll_list);
+ spin_unlock(&ll_sb_lock);
+ OBD_FREE(sbi, sizeof(*sbi));
+ }
+ EXIT;
}
static struct dentry_operations ll_d_root_ops = {
GOTO(out_dt, err);
}
- cfs_mutex_lock(&sbi->ll_lco.lco_lock);
+ mutex_lock(&sbi->ll_lco.lco_lock);
sbi->ll_lco.lco_flags = data->ocd_connect_flags;
sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
- cfs_mutex_unlock(&sbi->ll_lco.lco_lock);
+ mutex_unlock(&sbi->ll_lco.lco_lock);
fid_zero(&sbi->ll_root_fid);
err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
void ll_lli_init(struct ll_inode_info *lli)
{
- lli->lli_inode_magic = LLI_INODE_MAGIC;
- lli->lli_flags = 0;
- lli->lli_ioepoch = 0;
- lli->lli_maxbytes = MAX_LFS_FILESIZE;
- cfs_spin_lock_init(&lli->lli_lock);
- lli->lli_posix_acl = NULL;
- lli->lli_remote_perms = NULL;
- cfs_mutex_init(&lli->lli_rmtperm_mutex);
+ lli->lli_inode_magic = LLI_INODE_MAGIC;
+ lli->lli_flags = 0;
+ lli->lli_ioepoch = 0;
+ lli->lli_maxbytes = MAX_LFS_FILESIZE;
+ spin_lock_init(&lli->lli_lock);
+ lli->lli_posix_acl = NULL;
+ lli->lli_remote_perms = NULL;
+ mutex_init(&lli->lli_rmtperm_mutex);
/* Do not set lli_fid, it has been initialized already. */
fid_zero(&lli->lli_pfid);
CFS_INIT_LIST_HEAD(&lli->lli_close_list);
lli->lli_open_fd_read_count = 0;
lli->lli_open_fd_write_count = 0;
lli->lli_open_fd_exec_count = 0;
- cfs_mutex_init(&lli->lli_och_mutex);
- cfs_spin_lock_init(&lli->lli_agl_lock);
+ mutex_init(&lli->lli_och_mutex);
+ spin_lock_init(&lli->lli_agl_lock);
lli->lli_has_smd = false;
- lli->lli_clob = NULL;
-
- LASSERT(lli->lli_vfs_inode.i_mode != 0);
- if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
- cfs_mutex_init(&lli->lli_readdir_mutex);
- lli->lli_opendir_key = NULL;
- lli->lli_sai = NULL;
- lli->lli_def_acl = NULL;
- cfs_spin_lock_init(&lli->lli_sa_lock);
- lli->lli_opendir_pid = 0;
- } else {
- cfs_sema_init(&lli->lli_size_sem, 1);
- lli->lli_size_sem_owner = NULL;
- lli->lli_symlink_name = NULL;
- cfs_init_rwsem(&lli->lli_trunc_sem);
- cfs_mutex_init(&lli->lli_write_mutex);
- cfs_init_rwsem(&lli->lli_glimpse_sem);
+ lli->lli_clob = NULL;
+
+ LASSERT(lli->lli_vfs_inode.i_mode != 0);
+ if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
+ mutex_init(&lli->lli_readdir_mutex);
+ lli->lli_opendir_key = NULL;
+ lli->lli_sai = NULL;
+ lli->lli_def_acl = NULL;
+ spin_lock_init(&lli->lli_sa_lock);
+ lli->lli_opendir_pid = 0;
+ } else {
+ sema_init(&lli->lli_size_sem, 1);
+ lli->lli_size_sem_owner = NULL;
+ lli->lli_symlink_name = NULL;
+ init_rwsem(&lli->lli_trunc_sem);
+ mutex_init(&lli->lli_write_mutex);
+ init_rwsem(&lli->lli_glimpse_sem);
lli->lli_glimpse_time = 0;
CFS_INIT_LIST_HEAD(&lli->lli_agl_list);
lli->lli_agl_index = 0;
lli->lli_async_rc = 0;
}
- cfs_mutex_init(&lli->lli_layout_mutex);
+ mutex_init(&lli->lli_layout_mutex);
}
static inline int ll_bdi_register(struct backing_dev_info *bdi)
if (ia_valid & ATTR_SIZE)
inode_dio_write_done(inode);
mutex_unlock(&inode->i_mutex);
- cfs_down_write(&lli->lli_trunc_sem);
+ down_write(&lli->lli_trunc_sem);
mutex_lock(&inode->i_mutex);
if (ia_valid & ATTR_SIZE)
inode_dio_wait(inode);
ll_finish_md_op_data(op_data);
}
if (!S_ISDIR(inode->i_mode))
- cfs_up_write(&lli->lli_trunc_sem);
+ up_write(&lli->lli_trunc_sem);
ll_stats_ops_tally(ll_i2sbi(inode), (ia_valid & ATTR_SIZE) ?
LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
lli = ll_i2info(inode);
LASSERT(lli->lli_size_sem_owner != current);
- cfs_down(&lli->lli_size_sem);
+ down(&lli->lli_size_sem);
LASSERT(lli->lli_size_sem_owner == NULL);
lli->lli_size_sem_owner = current;
}
lli = ll_i2info(inode);
LASSERT(lli->lli_size_sem_owner == current);
lli->lli_size_sem_owner = NULL;
- cfs_up(&lli->lli_size_sem);
+ up(&lli->lli_size_sem);
}
void ll_update_inode(struct inode *inode, struct lustre_md *md)
ll_update_remote_perm(inode, md->remote_perm);
}
#ifdef CONFIG_FS_POSIX_ACL
- else if (body->valid & OBD_MD_FLACL) {
- cfs_spin_lock(&lli->lli_lock);
- if (lli->lli_posix_acl)
- posix_acl_release(lli->lli_posix_acl);
- lli->lli_posix_acl = md->posix_acl;
- cfs_spin_unlock(&lli->lli_lock);
- }
+ else if (body->valid & OBD_MD_FLACL) {
+ spin_lock(&lli->lli_lock);
+ if (lli->lli_posix_acl)
+ posix_acl_release(lli->lli_posix_acl);
+ lli->lli_posix_acl = md->posix_acl;
+ spin_unlock(&lli->lli_lock);
+ }
#endif
inode->i_ino = cl_fid_build_ino(&body->fid1, 0);
inode->i_generation = cl_fid_build_gen(&body->fid1);
!ll_i2info(i2)->lli_has_smd) {
struct ll_inode_info *lli = ll_i2info(i2);
- cfs_spin_lock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
- op_data->op_fid1 = lli->lli_pfid;
- cfs_spin_unlock(&lli->lli_lock);
- /** We ignore parent's capability temporary. */
- }
+ op_data->op_fid1 = lli->lli_pfid;
+ spin_unlock(&lli->lli_lock);
+ /** We ignore parent's capability temporary. */
+ }
- return op_data;
+ return op_data;
}
void ll_finish_md_op_data(struct md_op_data *op_data)
* while truncate is on-going. */
inode = ccc_object_inode(io->ci_obj);
lli = ll_i2info(inode);
- cfs_down_read(&lli->lli_trunc_sem);
+ down_read(&lli->lli_trunc_sem);
result = cl_io_loop(env, io);
- cfs_up_read(&lli->lli_trunc_sem);
+ up_read(&lli->lli_trunc_sem);
cfs_restore_sigs(set);
* We have to find the parent to tell MDS how to init lov objects.
*/
if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd &&
- parent != NULL) {
- struct ll_inode_info *lli = ll_i2info(inode);
+ parent != NULL) {
+ struct ll_inode_info *lli = ll_i2info(inode);
- cfs_spin_lock(&lli->lli_lock);
- lli->lli_pfid = *parent;
- cfs_spin_unlock(&lli->lli_lock);
- }
+ spin_lock(&lli->lli_lock);
+ lli->lli_pfid = *parent;
+ spin_unlock(&lli->lli_lock);
+ }
result = d_obtain_alias(inode);
if (IS_ERR(result))
lgd.lgd_fid = ll_i2info(child->d_inode)->lli_fid;
lgd.lgd_found = 0;
- cfs_mutex_lock(&dir->i_mutex);
+ mutex_lock(&dir->i_mutex);
rc = ll_dir_read(dir, &offset, &lgd, ll_nfs_get_name_filldir);
- cfs_mutex_unlock(&dir->i_mutex);
+ mutex_unlock(&dir->i_mutex);
if (!rc && !lgd.lgd_found)
rc = -ENOENT;
EXIT;
struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key)
{
- struct rmtacl_ctl_entry *rce;
+ struct rmtacl_ctl_entry *rce;
- cfs_spin_lock(&rct->rct_lock);
- rce = __rct_search(rct, key);
- cfs_spin_unlock(&rct->rct_lock);
- return rce;
+ spin_lock(&rct->rct_lock);
+ rce = __rct_search(rct, key);
+ spin_unlock(&rct->rct_lock);
+ return rce;
}
int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
if (rce == NULL)
return -ENOMEM;
- cfs_spin_lock(&rct->rct_lock);
- e = __rct_search(rct, key);
- if (unlikely(e != NULL)) {
- CWARN("Unexpected stale rmtacl_entry found: "
- "[key: %d] [ops: %d]\n", (int)key, ops);
- rce_free(e);
- }
- cfs_list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
- cfs_spin_unlock(&rct->rct_lock);
-
- return 0;
+ spin_lock(&rct->rct_lock);
+ e = __rct_search(rct, key);
+ if (unlikely(e != NULL)) {
+ CWARN("Unexpected stale rmtacl_entry found: "
+ "[key: %d] [ops: %d]\n", (int)key, ops);
+ rce_free(e);
+ }
+ cfs_list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
+ spin_unlock(&rct->rct_lock);
+
+ return 0;
}
int rct_del(struct rmtacl_ctl_table *rct, pid_t key)
{
- struct rmtacl_ctl_entry *rce;
+ struct rmtacl_ctl_entry *rce;
- cfs_spin_lock(&rct->rct_lock);
- rce = __rct_search(rct, key);
- if (rce)
- rce_free(rce);
- cfs_spin_unlock(&rct->rct_lock);
+ spin_lock(&rct->rct_lock);
+ rce = __rct_search(rct, key);
+ if (rce)
+ rce_free(rce);
+ spin_unlock(&rct->rct_lock);
- return rce ? 0 : -ENOENT;
+ return rce ? 0 : -ENOENT;
}
void rct_init(struct rmtacl_ctl_table *rct)
{
- int i;
+ int i;
- cfs_spin_lock_init(&rct->rct_lock);
- for (i = 0; i < RCE_HASHES; i++)
- CFS_INIT_LIST_HEAD(&rct->rct_entries[i]);
+ spin_lock_init(&rct->rct_lock);
+ for (i = 0; i < RCE_HASHES; i++)
+ CFS_INIT_LIST_HEAD(&rct->rct_entries[i]);
}
void rct_fini(struct rmtacl_ctl_table *rct)
{
- struct rmtacl_ctl_entry *rce;
- int i;
-
- cfs_spin_lock(&rct->rct_lock);
- for (i = 0; i < RCE_HASHES; i++)
- while (!cfs_list_empty(&rct->rct_entries[i])) {
- rce = cfs_list_entry(rct->rct_entries[i].next,
- struct rmtacl_ctl_entry, rce_list);
- rce_free(rce);
- }
- cfs_spin_unlock(&rct->rct_lock);
+ struct rmtacl_ctl_entry *rce;
+ int i;
+
+ spin_lock(&rct->rct_lock);
+ for (i = 0; i < RCE_HASHES; i++)
+ while (!cfs_list_empty(&rct->rct_entries[i])) {
+ rce = cfs_list_entry(rct->rct_entries[i].next,
+ struct rmtacl_ctl_entry, rce_list);
+ rce_free(rce);
+ }
+ spin_unlock(&rct->rct_lock);
}
struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
struct lu_fid *fid, int type)
{
- struct eacl_entry *ee;
+ struct eacl_entry *ee;
- cfs_spin_lock(&et->et_lock);
- ee = __et_search_del(et, key, fid, type);
- cfs_spin_unlock(&et->et_lock);
- return ee;
+ spin_lock(&et->et_lock);
+ ee = __et_search_del(et, key, fid, type);
+ spin_unlock(&et->et_lock);
+ return ee;
}
void et_search_free(struct eacl_table *et, pid_t key)
{
- struct eacl_entry *ee, *next;
- cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
+ struct eacl_entry *ee, *next;
+ cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
- cfs_spin_lock(&et->et_lock);
- cfs_list_for_each_entry_safe(ee, next, head, ee_list)
- if (ee->ee_key == key)
- ee_free(ee);
+ spin_lock(&et->et_lock);
+ cfs_list_for_each_entry_safe(ee, next, head, ee_list)
+ if (ee->ee_key == key)
+ ee_free(ee);
- cfs_spin_unlock(&et->et_lock);
+ spin_unlock(&et->et_lock);
}
int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
if (ee == NULL)
return -ENOMEM;
- cfs_spin_lock(&et->et_lock);
- e = __et_search_del(et, key, fid, type);
- if (unlikely(e != NULL)) {
- CWARN("Unexpected stale eacl_entry found: "
- "[key: %d] [fid: "DFID"] [type: %d]\n",
- (int)key, PFID(fid), type);
- ee_free(e);
- }
- cfs_list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
- cfs_spin_unlock(&et->et_lock);
-
- return 0;
+ spin_lock(&et->et_lock);
+ e = __et_search_del(et, key, fid, type);
+ if (unlikely(e != NULL)) {
+ CWARN("Unexpected stale eacl_entry found: "
+ "[key: %d] [fid: "DFID"] [type: %d]\n",
+ (int)key, PFID(fid), type);
+ ee_free(e);
+ }
+ cfs_list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
+ spin_unlock(&et->et_lock);
+
+ return 0;
}
void et_init(struct eacl_table *et)
{
- int i;
+ int i;
- cfs_spin_lock_init(&et->et_lock);
- for (i = 0; i < EE_HASHES; i++)
- CFS_INIT_LIST_HEAD(&et->et_entries[i]);
+ spin_lock_init(&et->et_lock);
+ for (i = 0; i < EE_HASHES; i++)
+ CFS_INIT_LIST_HEAD(&et->et_entries[i]);
}
void et_fini(struct eacl_table *et)
{
- struct eacl_entry *ee;
- int i;
-
- cfs_spin_lock(&et->et_lock);
- for (i = 0; i < EE_HASHES; i++)
- while (!cfs_list_empty(&et->et_entries[i])) {
- ee = cfs_list_entry(et->et_entries[i].next,
- struct eacl_entry, ee_list);
- ee_free(ee);
- }
- cfs_spin_unlock(&et->et_lock);
+ struct eacl_entry *ee;
+ int i;
+
+ spin_lock(&et->et_lock);
+ for (i = 0; i < EE_HASHES; i++)
+ while (!cfs_list_empty(&et->et_entries[i])) {
+ ee = cfs_list_entry(et->et_entries[i].next,
+ struct eacl_entry, ee_list);
+ ee_free(ee);
+ }
+ spin_unlock(&et->et_lock);
}
#endif
int old_gfp_mask;
- cfs_spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
- int lo_state;
- cfs_semaphore_t lo_sem;
- cfs_mutex_t lo_ctl_mutex;
+ spinlock_t lo_lock;
+ struct bio *lo_bio;
+ struct bio *lo_biotail;
+ int lo_state;
+ struct semaphore lo_sem;
+ struct mutex lo_ctl_mutex;
cfs_atomic_t lo_pending;
cfs_waitq_t lo_bh_wait;
static int max_loop = MAX_LOOP_DEFAULT;
static struct lloop_device *loop_dev;
static struct gendisk **disks;
-static cfs_mutex_t lloop_mutex;
+static struct mutex lloop_mutex;
static void *ll_iocontrol_magic = NULL;
static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
*/
static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
{
- unsigned long flags;
-
- cfs_spin_lock_irqsave(&lo->lo_lock, flags);
- if (lo->lo_biotail) {
- lo->lo_biotail->bi_next = bio;
- lo->lo_biotail = bio;
- } else
- lo->lo_bio = lo->lo_biotail = bio;
- cfs_spin_unlock_irqrestore(&lo->lo_lock, flags);
-
- cfs_atomic_inc(&lo->lo_pending);
- if (cfs_waitq_active(&lo->lo_bh_wait))
- cfs_waitq_signal(&lo->lo_bh_wait);
+ unsigned long flags;
+
+ spin_lock_irqsave(&lo->lo_lock, flags);
+ if (lo->lo_biotail) {
+ lo->lo_biotail->bi_next = bio;
+ lo->lo_biotail = bio;
+ } else
+ lo->lo_bio = lo->lo_biotail = bio;
+ spin_unlock_irqrestore(&lo->lo_lock, flags);
+
+ cfs_atomic_inc(&lo->lo_pending);
+ if (cfs_waitq_active(&lo->lo_bh_wait))
+ cfs_waitq_signal(&lo->lo_bh_wait);
}
/*
*/
static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
{
- struct bio *first;
- struct bio **bio;
- unsigned int count = 0;
- unsigned int page_count = 0;
- int rw;
-
- cfs_spin_lock_irq(&lo->lo_lock);
- first = lo->lo_bio;
- if (unlikely(first == NULL)) {
- cfs_spin_unlock_irq(&lo->lo_lock);
- return 0;
- }
+ struct bio *first;
+ struct bio **bio;
+ unsigned int count = 0;
+ unsigned int page_count = 0;
+ int rw;
+
+ spin_lock_irq(&lo->lo_lock);
+ first = lo->lo_bio;
+ if (unlikely(first == NULL)) {
+ spin_unlock_irq(&lo->lo_lock);
+ return 0;
+ }
/* TODO: need to split the bio, too bad. */
LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
lo->lo_bio = NULL;
}
*req = first;
- cfs_spin_unlock_irq(&lo->lo_lock);
- return count;
+ spin_unlock_irq(&lo->lo_lock);
+ return count;
}
static ll_mrf_ret
CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
(unsigned long long)old_bio->bi_sector, old_bio->bi_size);
- cfs_spin_lock_irq(&lo->lo_lock);
- inactive = (lo->lo_state != LLOOP_BOUND);
- cfs_spin_unlock_irq(&lo->lo_lock);
+ spin_lock_irq(&lo->lo_lock);
+ inactive = (lo->lo_state != LLOOP_BOUND);
+ spin_unlock_irq(&lo->lo_lock);
if (inactive)
goto err;
/*
* up sem, we are running
*/
- cfs_up(&lo->lo_sem);
-
- for (;;) {
- cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
- if (!cfs_atomic_read(&lo->lo_pending)) {
- int exiting = 0;
- cfs_spin_lock_irq(&lo->lo_lock);
- exiting = (lo->lo_state == LLOOP_RUNDOWN);
- cfs_spin_unlock_irq(&lo->lo_lock);
+ up(&lo->lo_sem);
+
+ for (;;) {
+ cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
+ if (!cfs_atomic_read(&lo->lo_pending)) {
+ int exiting = 0;
+ spin_lock_irq(&lo->lo_lock);
+ exiting = (lo->lo_state == LLOOP_RUNDOWN);
+ spin_unlock_irq(&lo->lo_lock);
if (exiting)
break;
}
cl_env_put(env, &refcheck);
out:
- cfs_up(&lo->lo_sem);
+ up(&lo->lo_sem);
return ret;
}
set_blocksize(bdev, lo->lo_blocksize);
cfs_create_thread(loop_thread, lo, CLONE_KERNEL);
- cfs_down(&lo->lo_sem);
+ down(&lo->lo_sem);
return 0;
out:
if (filp == NULL)
return -EINVAL;
- cfs_spin_lock_irq(&lo->lo_lock);
- lo->lo_state = LLOOP_RUNDOWN;
- cfs_spin_unlock_irq(&lo->lo_lock);
- cfs_waitq_signal(&lo->lo_bh_wait);
+ spin_lock_irq(&lo->lo_lock);
+ lo->lo_state = LLOOP_RUNDOWN;
+ spin_unlock_irq(&lo->lo_lock);
+ cfs_waitq_signal(&lo->lo_bh_wait);
- cfs_down(&lo->lo_sem);
+ down(&lo->lo_sem);
lo->lo_backing_file = NULL;
lo->ioctl = NULL;
lo->lo_device = NULL;
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
#endif
- cfs_mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
- cfs_mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
#endif
- cfs_mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&lo->lo_ctl_mutex);
--lo->lo_refcnt;
- cfs_mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
int err = 0;
#endif
- cfs_mutex_lock(&lloop_mutex);
+ mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_DETACH: {
err = loop_clr_fd(lo, bdev, 2);
err = -EINVAL;
break;
}
- cfs_mutex_unlock(&lloop_mutex);
+ mutex_unlock(&lloop_mutex);
return err;
}
CWARN("Enter llop_ioctl\n");
- cfs_mutex_lock(&lloop_mutex);
+ mutex_lock(&lloop_mutex);
switch (cmd) {
case LL_IOC_LLOOP_ATTACH: {
struct lloop_device *lo_free = NULL;
}
out:
- cfs_mutex_unlock(&lloop_mutex);
+ mutex_unlock(&lloop_mutex);
out1:
if (rcp)
*rcp = err;
goto out_mem3;
}
- cfs_mutex_init(&lloop_mutex);
+ mutex_init(&lloop_mutex);
for (i = 0; i < max_loop; i++) {
struct lloop_device *lo = &loop_dev[i];
if (!lo->lo_queue)
goto out_mem4;
- cfs_mutex_init(&lo->lo_ctl_mutex);
- cfs_sema_init(&lo->lo_sem, 0);
- cfs_waitq_init(&lo->lo_bh_wait);
- lo->lo_number = i;
- cfs_spin_lock_init(&lo->lo_lock);
+ mutex_init(&lo->lo_ctl_mutex);
+ sema_init(&lo->lo_sem, 0);
+ cfs_waitq_init(&lo->lo_bh_wait);
+ lo->lo_number = i;
+ spin_lock_init(&lo->lo_lock);
disk->major = lloop_major;
disk->first_minor = i;
disk->fops = &lo_fops;
static int ll_rd_max_readahead_mb(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- struct super_block *sb = data;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- long pages_number;
- int mult;
+ struct super_block *sb = data;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ long pages_number;
+ int mult;
- cfs_spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_pages;
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
+ pages_number = sbi->ll_ra_info.ra_max_pages;
+ spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - PAGE_CACHE_SHIFT);
- return lprocfs_read_frac_helper(page, count, pages_number, mult);
+ mult = 1 << (20 - PAGE_CACHE_SHIFT);
+ return lprocfs_read_frac_helper(page, count, pages_number, mult);
}
static int ll_wr_max_readahead_mb(struct file *file, const char *buffer,
return -ERANGE;
}
- cfs_spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_pages = pages_number;
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_ra_info.ra_max_pages = pages_number;
+ spin_unlock(&sbi->ll_lock);
- return count;
+ return count;
}
static int ll_rd_max_readahead_per_file_mb(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- struct super_block *sb = data;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- long pages_number;
- int mult;
+ struct super_block *sb = data;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ long pages_number;
+ int mult;
- cfs_spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
+ pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
+ spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - CFS_PAGE_SHIFT);
- return lprocfs_read_frac_helper(page, count, pages_number, mult);
+ mult = 1 << (20 - CFS_PAGE_SHIFT);
+ return lprocfs_read_frac_helper(page, count, pages_number, mult);
}
static int ll_wr_max_readahead_per_file_mb(struct file *file, const char *buffer,
return -ERANGE;
}
- cfs_spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
+ spin_unlock(&sbi->ll_lock);
- return count;
+ return count;
}
static int ll_rd_max_read_ahead_whole_mb(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- struct super_block *sb = data;
- struct ll_sb_info *sbi = ll_s2sbi(sb);
- long pages_number;
- int mult;
+ struct super_block *sb = data;
+ struct ll_sb_info *sbi = ll_s2sbi(sb);
+ long pages_number;
+ int mult;
- cfs_spin_lock(&sbi->ll_lock);
- pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
+ pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
+ spin_unlock(&sbi->ll_lock);
- mult = 1 << (20 - CFS_PAGE_SHIFT);
- return lprocfs_read_frac_helper(page, count, pages_number, mult);
+ mult = 1 << (20 - CFS_PAGE_SHIFT);
+ return lprocfs_read_frac_helper(page, count, pages_number, mult);
}
static int ll_wr_max_read_ahead_whole_mb(struct file *file, const char *buffer,
return -ERANGE;
}
- cfs_spin_lock(&sbi->ll_lock);
- sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
+ sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
+ spin_unlock(&sbi->ll_lock);
- return count;
+ return count;
}
static int ll_rd_max_cached_mb(char *page, char **start, off_t off,
if (sbi->ll_dt_exp == NULL)
RETURN(-ENODEV);
- cfs_spin_lock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
diff = pages_number - cache->ccc_lru_max;
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_unlock(&sbi->ll_lock);
/* easy - add more LRU slots. */
if (diff >= 0) {
out:
if (rc >= 0) {
- cfs_spin_lock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
cache->ccc_lru_max = pages_number;
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_unlock(&sbi->ll_lock);
rc = count;
} else {
cfs_atomic_add(nrpages, &cache->ccc_lru_left);
seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
"extents", "calls", "%", "cum%",
"calls", "%", "cum%");
- cfs_spin_lock(&sbi->ll_pp_extent_lock);
- for(k = 0; k < LL_PROCESS_HIST_MAX; k++) {
- if(io_extents->pp_extents[k].pid != 0) {
- seq_printf(seq, "\nPID: %d\n",
- io_extents->pp_extents[k].pid);
- ll_display_extents_info(io_extents, seq, k);
- }
- }
- cfs_spin_unlock(&sbi->ll_pp_extent_lock);
- return 0;
+ spin_lock(&sbi->ll_pp_extent_lock);
+ for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
+ if (io_extents->pp_extents[k].pid != 0) {
+ seq_printf(seq, "\nPID: %d\n",
+ io_extents->pp_extents[k].pid);
+ ll_display_extents_info(io_extents, seq, k);
+ }
+ }
+ spin_unlock(&sbi->ll_pp_extent_lock);
+ return 0;
}
static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
else
sbi->ll_rw_stats_on = 1;
- cfs_spin_lock(&sbi->ll_pp_extent_lock);
- for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
- io_extents->pp_extents[i].pid = 0;
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
- }
- cfs_spin_unlock(&sbi->ll_pp_extent_lock);
- return len;
+ spin_lock(&sbi->ll_pp_extent_lock);
+ for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
+ io_extents->pp_extents[i].pid = 0;
+ lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
+ lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
+ }
+ spin_unlock(&sbi->ll_pp_extent_lock);
+ return len;
}
LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
"extents", "calls", "%", "cum%",
"calls", "%", "cum%");
- cfs_spin_lock(&sbi->ll_lock);
- ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
- cfs_spin_unlock(&sbi->ll_lock);
+ spin_lock(&sbi->ll_lock);
+ ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
+ spin_unlock(&sbi->ll_lock);
- return 0;
+ return 0;
}
static ssize_t ll_rw_extents_stats_seq_write(struct file *file, const char *buf,
sbi->ll_rw_stats_on = 0;
else
sbi->ll_rw_stats_on = 1;
- cfs_spin_lock(&sbi->ll_pp_extent_lock);
- for(i = 0; i <= LL_PROCESS_HIST_MAX; i++)
- {
- io_extents->pp_extents[i].pid = 0;
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
- lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
- }
- cfs_spin_unlock(&sbi->ll_pp_extent_lock);
+ spin_lock(&sbi->ll_pp_extent_lock);
+ for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
+ io_extents->pp_extents[i].pid = 0;
+ lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
+ lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
+ }
+ spin_unlock(&sbi->ll_pp_extent_lock);
- return len;
+ return len;
}
LPROC_SEQ_FOPS(ll_rw_extents_stats);
process = sbi->ll_rw_process_info;
offset = sbi->ll_rw_offset_info;
- cfs_spin_lock(&sbi->ll_pp_extent_lock);
+ spin_lock(&sbi->ll_pp_extent_lock);
/* Extent statistics */
for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
if(io_extents->pp_extents[i].pid == pid) {
io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
}
- cfs_spin_unlock(&sbi->ll_pp_extent_lock);
+ spin_unlock(&sbi->ll_pp_extent_lock);
- cfs_spin_lock(&sbi->ll_process_lock);
+ spin_lock(&sbi->ll_process_lock);
/* Offset statistics */
for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
if (process[i].rw_pid == pid) {
process[i].rw_largest_extent = count;
process[i].rw_offset = 0;
process[i].rw_last_file = file;
- cfs_spin_unlock(&sbi->ll_process_lock);
+ spin_unlock(&sbi->ll_process_lock);
return;
}
if (process[i].rw_last_file_pos != pos) {
if(process[i].rw_largest_extent < count)
process[i].rw_largest_extent = count;
process[i].rw_last_file_pos = pos + count;
- cfs_spin_unlock(&sbi->ll_process_lock);
+ spin_unlock(&sbi->ll_process_lock);
return;
}
}
process[*process_count].rw_largest_extent = count;
process[*process_count].rw_offset = 0;
process[*process_count].rw_last_file = file;
- cfs_spin_unlock(&sbi->ll_process_lock);
+ spin_unlock(&sbi->ll_process_lock);
}
static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
"then 0 or \"[D/d]isabled\" to deactivate\n");
return 0;
}
- cfs_spin_lock(&sbi->ll_process_lock);
+ spin_lock(&sbi->ll_process_lock);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
now.tv_sec, now.tv_usec);
(unsigned long)process[i].rw_largest_extent,
process[i].rw_offset);
}
- cfs_spin_unlock(&sbi->ll_process_lock);
+ spin_unlock(&sbi->ll_process_lock);
- return 0;
+ return 0;
}
static ssize_t ll_rw_offset_stats_seq_write(struct file *file, const char *buf,
else
sbi->ll_rw_stats_on = 1;
- cfs_spin_lock(&sbi->ll_process_lock);
- sbi->ll_offset_process_count = 0;
- sbi->ll_rw_offset_entry_count = 0;
- memset(process_info, 0, sizeof(struct ll_rw_process_info) *
- LL_PROCESS_HIST_MAX);
- memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
- LL_OFFSET_HIST_MAX);
- cfs_spin_unlock(&sbi->ll_process_lock);
+ spin_lock(&sbi->ll_process_lock);
+ sbi->ll_offset_process_count = 0;
+ sbi->ll_rw_offset_entry_count = 0;
+ memset(process_info, 0, sizeof(struct ll_rw_process_info) *
+ LL_PROCESS_HIST_MAX);
+ memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
+ LL_OFFSET_HIST_MAX);
+ spin_unlock(&sbi->ll_process_lock);
- return len;
+ return len;
}
LPROC_SEQ_FOPS(ll_rw_offset_stats);
head = lli->lli_remote_perms + remote_perm_hashfunc(cfs_curproc_uid());
- cfs_spin_lock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
cfs_hlist_for_each_entry(lrp, node, head, lrp_list) {
if (lrp->lrp_uid != cfs_curproc_uid())
continue;
rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
out:
- cfs_spin_unlock(&lli->lli_lock);
- return rc;
+ spin_unlock(&lli->lli_lock);
+ return rc;
}
int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
}
}
- cfs_spin_lock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
if (!lli->lli_remote_perms)
lli->lli_remote_perms = perm_hash;
break;
}
- if (!lrp) {
- cfs_spin_unlock(&lli->lli_lock);
- lrp = alloc_ll_remote_perm();
- if (!lrp) {
- CERROR("alloc memory for ll_remote_perm failed!\n");
- RETURN(-ENOMEM);
- }
- cfs_spin_lock(&lli->lli_lock);
- goto again;
- }
+ if (!lrp) {
+ spin_unlock(&lli->lli_lock);
+ lrp = alloc_ll_remote_perm();
+ if (!lrp) {
+ CERROR("alloc memory for ll_remote_perm failed!\n");
+ RETURN(-ENOMEM);
+ }
+ spin_lock(&lli->lli_lock);
+ goto again;
+ }
lrp->lrp_access_perm = perm->rp_access_perm;
if (lrp != tmp) {
cfs_hlist_add_head(&lrp->lrp_list, head);
}
lli->lli_rmtperm_time = cfs_time_current();
- cfs_spin_unlock(&lli->lli_lock);
+ spin_unlock(&lli->lli_lock);
CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
cfs_might_sleep();
- cfs_mutex_lock(&lli->lli_rmtperm_mutex);
+ mutex_lock(&lli->lli_rmtperm_mutex);
/* check again */
if (save != lli->lli_rmtperm_time) {
rc = do_check_remote_perm(lli, mask);
if (!rc || (rc != -ENOENT && i)) {
- cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
+ mutex_unlock(&lli->lli_rmtperm_mutex);
break;
}
}
ll_i2suppgid(inode), &req);
capa_put(oc);
if (rc) {
- cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
+ mutex_unlock(&lli->lli_rmtperm_mutex);
break;
}
perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
if (unlikely(perm == NULL)) {
- cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
+ mutex_unlock(&lli->lli_rmtperm_mutex);
rc = -EPROTO;
break;
}
rc = ll_update_remote_perm(inode, perm);
- cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
+ mutex_unlock(&lli->lli_rmtperm_mutex);
if (rc == -ENOMEM)
break;
LASSERT(hash);
- cfs_spin_lock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
- for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
- cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
- lrp_list)
- free_ll_remote_perm(lrp);
- }
+ for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
+ cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
+ lrp_list)
+ free_ll_remote_perm(lrp);
+ }
- cfs_spin_unlock(&lli->lli_lock);
+ spin_unlock(&lli->lli_lock);
}
#endif
void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
{
- struct ll_readahead_state *ras;
+ struct ll_readahead_state *ras;
- ras = ll_ras_get(f);
+ ras = ll_ras_get(f);
- cfs_spin_lock(&ras->ras_lock);
- ras->ras_requests++;
- ras->ras_request_index = 0;
- ras->ras_consecutive_requests++;
- rar->lrr_reader = current;
+ spin_lock(&ras->ras_lock);
+ ras->ras_requests++;
+ ras->ras_request_index = 0;
+ ras->ras_consecutive_requests++;
+ rar->lrr_reader = current;
- cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- cfs_spin_unlock(&ras->ras_lock);
+ cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+ spin_unlock(&ras->ras_lock);
}
void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
{
- struct ll_readahead_state *ras;
+ struct ll_readahead_state *ras;
- ras = ll_ras_get(f);
+ ras = ll_ras_get(f);
- cfs_spin_lock(&ras->ras_lock);
- cfs_list_del_init(&rar->lrr_linkage);
- cfs_spin_unlock(&ras->ras_lock);
+ spin_lock(&ras->ras_lock);
+ cfs_list_del_init(&rar->lrr_linkage);
+ spin_unlock(&ras->ras_lock);
}
static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
struct ll_ra_read *ll_ra_read_get(struct file *f)
{
- struct ll_readahead_state *ras;
- struct ll_ra_read *bead;
+ struct ll_readahead_state *ras;
+ struct ll_ra_read *bead;
- ras = ll_ras_get(f);
+ ras = ll_ras_get(f);
- cfs_spin_lock(&ras->ras_lock);
- bead = ll_ra_read_get_locked(ras);
- cfs_spin_unlock(&ras->ras_lock);
- return bead;
+ spin_lock(&ras->ras_lock);
+ bead = ll_ra_read_get_locked(ras);
+ spin_unlock(&ras->ras_lock);
+ return bead;
}
static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
RETURN(0);
}
- cfs_spin_lock(&ras->ras_lock);
+ spin_lock(&ras->ras_lock);
if (vio->cui_ra_window_set)
bead = &vio->cui_bead;
else
ria->ria_length = ras->ras_stride_length;
ria->ria_pages = ras->ras_stride_pages;
}
- cfs_spin_unlock(&ras->ras_lock);
+ spin_unlock(&ras->ras_lock);
if (end == 0) {
ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
ra_end, end, ria->ria_end);
- if (ra_end != end + 1) {
- cfs_spin_lock(&ras->ras_lock);
- if (ra_end < ras->ras_next_readahead &&
- index_in_window(ra_end, ras->ras_window_start, 0,
- ras->ras_window_len)) {
- ras->ras_next_readahead = ra_end;
- RAS_CDEBUG(ras);
- }
- cfs_spin_unlock(&ras->ras_lock);
- }
+ if (ra_end != end + 1) {
+ spin_lock(&ras->ras_lock);
+ if (ra_end < ras->ras_next_readahead &&
+ index_in_window(ra_end, ras->ras_window_start, 0,
+ ras->ras_window_len)) {
+ ras->ras_next_readahead = ra_end;
+ RAS_CDEBUG(ras);
+ }
+ spin_unlock(&ras->ras_lock);
+ }
- RETURN(ret);
+ RETURN(ret);
}
static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
{
- cfs_spin_lock_init(&ras->ras_lock);
- ras_reset(ras, 0);
- ras->ras_requests = 0;
- CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
+ spin_lock_init(&ras->ras_lock);
+ ras_reset(ras, 0);
+ ras->ras_requests = 0;
+ CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
}
void ras_update(struct ll_sb_info *sbi, struct inode *inode,
- struct ll_readahead_state *ras, unsigned long index,
- unsigned hit)
+ struct ll_readahead_state *ras, unsigned long index,
+ unsigned hit)
{
- struct ll_ra_info *ra = &sbi->ll_ra_info;
- int zero = 0, stride_detect = 0, ra_miss = 0;
- ENTRY;
+ struct ll_ra_info *ra = &sbi->ll_ra_info;
+ int zero = 0, stride_detect = 0, ra_miss = 0;
+ ENTRY;
- cfs_spin_lock(&ras->ras_lock);
+ spin_lock(&ras->ras_lock);
ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
ras_increase_window(ras, ra, inode);
EXIT;
out_unlock:
- RAS_CDEBUG(ras);
- ras->ras_request_index++;
- cfs_spin_unlock(&ras->ras_lock);
- return;
+ RAS_CDEBUG(ras);
+ ras->ras_request_index++;
+ spin_unlock(&ras->ras_lock);
+ return;
}
int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
static inline void
ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
{
- int i = ll_sa_entry_hash(entry->se_qstr.hash);
+ int i = ll_sa_entry_hash(entry->se_qstr.hash);
- cfs_spin_lock(&sai->sai_cache_lock[i]);
- cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
- cfs_spin_unlock(&sai->sai_cache_lock[i]);
+ spin_lock(&sai->sai_cache_lock[i]);
+ cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
+ spin_unlock(&sai->sai_cache_lock[i]);
}
/*
static inline void
ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
{
- int i = ll_sa_entry_hash(entry->se_qstr.hash);
+ int i = ll_sa_entry_hash(entry->se_qstr.hash);
- cfs_spin_lock(&sai->sai_cache_lock[i]);
- cfs_list_del_init(&entry->se_hash);
- cfs_spin_unlock(&sai->sai_cache_lock[i]);
+ spin_lock(&sai->sai_cache_lock[i]);
+ cfs_list_del_init(&entry->se_hash);
+ spin_unlock(&sai->sai_cache_lock[i]);
}
static inline int agl_should_run(struct ll_statahead_info *sai,
entry->se_qstr.name = dname;
lli = ll_i2info(sai->sai_inode);
- cfs_spin_lock(&lli->lli_sa_lock);
- cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_lock(&lli->lli_sa_lock);
+ cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
+ spin_unlock(&lli->lli_sa_lock);
- cfs_atomic_inc(&sai->sai_cache_count);
- ll_sa_entry_enhash(sai, entry);
+ cfs_atomic_inc(&sai->sai_cache_count);
+ ll_sa_entry_enhash(sai, entry);
- RETURN(entry);
+ RETURN(entry);
}
/*
static inline void
do_sai_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
{
- struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
+ struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
- ll_sa_entry_unhash(sai, entry);
+ ll_sa_entry_unhash(sai, entry);
- cfs_spin_lock(&lli->lli_sa_lock);
- entry->se_stat = SA_ENTRY_DEST;
- if (likely(!ll_sa_entry_unlinked(entry)))
- cfs_list_del_init(&entry->se_list);
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_lock(&lli->lli_sa_lock);
+ entry->se_stat = SA_ENTRY_DEST;
+ if (likely(!ll_sa_entry_unlinked(entry)))
+ cfs_list_del_init(&entry->se_list);
+ spin_unlock(&lli->lli_sa_lock);
- ll_sa_entry_put(sai, entry);
+ ll_sa_entry_put(sai, entry);
}
/*
*/
static int
ll_sa_entry_to_stated(struct ll_statahead_info *sai,
- struct ll_sa_entry *entry, int rc)
+ struct ll_sa_entry *entry, int rc)
{
- struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
- int ret = 1;
+ struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
+ int ret = 1;
- ll_sa_entry_cleanup(sai, entry);
+ ll_sa_entry_cleanup(sai, entry);
- cfs_spin_lock(&lli->lli_sa_lock);
- if (likely(entry->se_stat != SA_ENTRY_DEST)) {
- do_sai_entry_to_stated(sai, entry, rc);
- ret = 0;
- }
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_lock(&lli->lli_sa_lock);
+ if (likely(entry->se_stat != SA_ENTRY_DEST)) {
+ do_sai_entry_to_stated(sai, entry, rc);
+ ret = 0;
+ }
+ spin_unlock(&lli->lli_sa_lock);
- return ret;
+ return ret;
}
/*
static void ll_agl_add(struct ll_statahead_info *sai,
struct inode *inode, int index)
{
- struct ll_inode_info *child = ll_i2info(inode);
- struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
- int added = 0;
-
- cfs_spin_lock(&child->lli_agl_lock);
- if (child->lli_agl_index == 0) {
- child->lli_agl_index = index;
- cfs_spin_unlock(&child->lli_agl_lock);
-
- LASSERT(cfs_list_empty(&child->lli_agl_list));
-
- igrab(inode);
- cfs_spin_lock(&parent->lli_agl_lock);
- if (agl_list_empty(sai))
- added = 1;
- cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
- cfs_spin_unlock(&parent->lli_agl_lock);
- } else {
- cfs_spin_unlock(&child->lli_agl_lock);
- }
-
- if (added > 0)
- cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
+ struct ll_inode_info *child = ll_i2info(inode);
+ struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
+ int added = 0;
+
+ spin_lock(&child->lli_agl_lock);
+ if (child->lli_agl_index == 0) {
+ child->lli_agl_index = index;
+ spin_unlock(&child->lli_agl_lock);
+
+ LASSERT(cfs_list_empty(&child->lli_agl_list));
+
+ igrab(inode);
+ spin_lock(&parent->lli_agl_lock);
+ if (agl_list_empty(sai))
+ added = 1;
+ cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
+ spin_unlock(&parent->lli_agl_lock);
+ } else {
+ spin_unlock(&child->lli_agl_lock);
+ }
+
+ if (added > 0)
+ cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
}
static struct ll_statahead_info *ll_sai_alloc(void)
cfs_atomic_set(&sai->sai_refcount, 1);
- cfs_spin_lock(&sai_generation_lock);
- sai->sai_generation = ++sai_generation;
- if (unlikely(sai_generation == 0))
- sai->sai_generation = ++sai_generation;
- cfs_spin_unlock(&sai_generation_lock);
+ spin_lock(&sai_generation_lock);
+ sai->sai_generation = ++sai_generation;
+ if (unlikely(sai_generation == 0))
+ sai->sai_generation = ++sai_generation;
+ spin_unlock(&sai_generation_lock);
sai->sai_max = LL_SA_RPC_MIN;
sai->sai_index = 1;
for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
- cfs_spin_lock_init(&sai->sai_cache_lock[i]);
+ spin_lock_init(&sai->sai_cache_lock[i]);
}
cfs_atomic_set(&sai->sai_cache_count, 0);
if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
/* It is race case, the interpret callback just hold
* a reference count */
- cfs_spin_unlock(&lli->lli_sa_lock);
- RETURN_EXIT;
- }
+ spin_unlock(&lli->lli_sa_lock);
+ RETURN_EXIT;
+ }
- LASSERT(lli->lli_opendir_key == NULL);
- LASSERT(thread_is_stopped(&sai->sai_thread));
- LASSERT(thread_is_stopped(&sai->sai_agl_thread));
+ LASSERT(lli->lli_opendir_key == NULL);
+ LASSERT(thread_is_stopped(&sai->sai_thread));
+ LASSERT(thread_is_stopped(&sai->sai_agl_thread));
- lli->lli_sai = NULL;
- lli->lli_opendir_pid = 0;
- cfs_spin_unlock(&lli->lli_sa_lock);
+ lli->lli_sai = NULL;
+ lli->lli_opendir_pid = 0;
+ spin_unlock(&lli->lli_sa_lock);
if (sai->sai_sent > sai->sai_replied)
CDEBUG(D_READA,"statahead for dir "DFID" does not "
}
/* Someone is in glimpse (sync or async), do nothing. */
- rc = cfs_down_write_trylock(&lli->lli_glimpse_sem);
+ rc = down_write_trylock(&lli->lli_glimpse_sem);
if (rc == 0) {
lli->lli_agl_index = 0;
iput(inode);
*/
if (lli->lli_glimpse_time != 0 &&
cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
- cfs_up_write(&lli->lli_glimpse_sem);
+ up_write(&lli->lli_glimpse_sem);
lli->lli_agl_index = 0;
iput(inode);
RETURN_EXIT;
cl_agl(inode);
lli->lli_agl_index = 0;
lli->lli_glimpse_time = cfs_time_current();
- cfs_up_write(&lli->lli_glimpse_sem);
+ up_write(&lli->lli_glimpse_sem);
CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
DFID", idx = "LPU64", rc = %d\n",
int rc = 0;
ENTRY;
- cfs_spin_lock(&lli->lli_sa_lock);
- if (target != NULL && target->se_req != NULL &&
- !cfs_list_empty(&target->se_list)) {
- entry = target;
- } else if (unlikely(sa_received_empty(sai))) {
- cfs_spin_unlock(&lli->lli_sa_lock);
- RETURN_EXIT;
- } else {
- entry = sa_first_received_entry(sai);
- }
-
- cfs_atomic_inc(&entry->se_refcount);
- cfs_list_del_init(&entry->se_list);
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_lock(&lli->lli_sa_lock);
+ if (target != NULL && target->se_req != NULL &&
+ !cfs_list_empty(&target->se_list)) {
+ entry = target;
+ } else if (unlikely(sa_received_empty(sai))) {
+ spin_unlock(&lli->lli_sa_lock);
+ RETURN_EXIT;
+ } else {
+ entry = sa_first_received_entry(sai);
+ }
+
+ cfs_atomic_inc(&entry->se_refcount);
+ cfs_list_del_init(&entry->se_list);
+ spin_unlock(&lli->lli_sa_lock);
LASSERT(entry->se_handle != 0);
if (it_disposition(it, DISP_LOOKUP_NEG))
rc = -ENOENT;
- cfs_spin_lock(&lli->lli_sa_lock);
- /* stale entry */
- if (unlikely(lli->lli_sai == NULL ||
- lli->lli_sai->sai_generation != minfo->mi_generation)) {
- cfs_spin_unlock(&lli->lli_sa_lock);
- GOTO(out, rc = -ESTALE);
- } else {
- sai = ll_sai_get(lli->lli_sai);
- if (unlikely(!thread_is_running(&sai->sai_thread))) {
- sai->sai_replied++;
- cfs_spin_unlock(&lli->lli_sa_lock);
- GOTO(out, rc = -EBADFD);
- }
-
- entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
- if (entry == NULL) {
- sai->sai_replied++;
- cfs_spin_unlock(&lli->lli_sa_lock);
- GOTO(out, rc = -EIDRM);
- }
-
- cfs_list_del_init(&entry->se_list);
- if (rc != 0) {
- sai->sai_replied++;
- do_sai_entry_to_stated(sai, entry, rc);
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_lock(&lli->lli_sa_lock);
+ /* stale entry */
+ if (unlikely(lli->lli_sai == NULL ||
+ lli->lli_sai->sai_generation != minfo->mi_generation)) {
+ spin_unlock(&lli->lli_sa_lock);
+ GOTO(out, rc = -ESTALE);
+ } else {
+ sai = ll_sai_get(lli->lli_sai);
+ if (unlikely(!thread_is_running(&sai->sai_thread))) {
+ sai->sai_replied++;
+ spin_unlock(&lli->lli_sa_lock);
+ GOTO(out, rc = -EBADFD);
+ }
+
+ entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
+ if (entry == NULL) {
+ sai->sai_replied++;
+ spin_unlock(&lli->lli_sa_lock);
+ GOTO(out, rc = -EIDRM);
+ }
+
+ cfs_list_del_init(&entry->se_list);
+ if (rc != 0) {
+ sai->sai_replied++;
+ do_sai_entry_to_stated(sai, entry, rc);
+ spin_unlock(&lli->lli_sa_lock);
if (entry->se_index == sai->sai_index_wait)
cfs_waitq_signal(&sai->sai_waitq);
} else {
cfs_list_add_tail(&entry->se_list,
&sai->sai_entries_received);
sai->sai_replied++;
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_unlock(&lli->lli_sa_lock);
if (wakeup)
cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
}
cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
atomic_inc(&sbi->ll_agl_total);
- cfs_spin_lock(&plli->lli_agl_lock);
- sai->sai_agl_valid = 1;
- thread_set_flags(thread, SVC_RUNNING);
- cfs_spin_unlock(&plli->lli_agl_lock);
+ spin_lock(&plli->lli_agl_lock);
+ sai->sai_agl_valid = 1;
+ thread_set_flags(thread, SVC_RUNNING);
+ spin_unlock(&plli->lli_agl_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
while (1) {
if (!thread_is_running(thread))
break;
- cfs_spin_lock(&plli->lli_agl_lock);
- /* The statahead thread maybe help to process AGL entries,
- * so check whether list empty again. */
- if (!agl_list_empty(sai)) {
- clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
- cfs_spin_unlock(&plli->lli_agl_lock);
- ll_agl_trigger(&clli->lli_vfs_inode, sai);
- } else {
- cfs_spin_unlock(&plli->lli_agl_lock);
- }
- }
-
- cfs_spin_lock(&plli->lli_agl_lock);
- sai->sai_agl_valid = 0;
- while (!agl_list_empty(sai)) {
- clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
- cfs_spin_unlock(&plli->lli_agl_lock);
- clli->lli_agl_index = 0;
- iput(&clli->lli_vfs_inode);
- cfs_spin_lock(&plli->lli_agl_lock);
- }
- thread_set_flags(thread, SVC_STOPPED);
- cfs_spin_unlock(&plli->lli_agl_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
- ll_sai_put(sai);
- CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
- cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
- RETURN(0);
+ spin_lock(&plli->lli_agl_lock);
+ /* The statahead thread maybe help to process AGL entries,
+ * so check whether list empty again. */
+ if (!agl_list_empty(sai)) {
+ clli = agl_first_entry(sai);
+ cfs_list_del_init(&clli->lli_agl_list);
+ spin_unlock(&plli->lli_agl_lock);
+ ll_agl_trigger(&clli->lli_vfs_inode, sai);
+ } else {
+ spin_unlock(&plli->lli_agl_lock);
+ }
+ }
+
+ spin_lock(&plli->lli_agl_lock);
+ sai->sai_agl_valid = 0;
+ while (!agl_list_empty(sai)) {
+ clli = agl_first_entry(sai);
+ cfs_list_del_init(&clli->lli_agl_list);
+ spin_unlock(&plli->lli_agl_lock);
+ clli->lli_agl_index = 0;
+ iput(&clli->lli_vfs_inode);
+ spin_lock(&plli->lli_agl_lock);
+ }
+ thread_set_flags(thread, SVC_STOPPED);
+ spin_unlock(&plli->lli_agl_lock);
+ cfs_waitq_signal(&thread->t_ctl_waitq);
+ ll_sai_put(sai);
+ CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
+ cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
+ RETURN(0);
}
static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
ll_start_agl(parent, sai);
atomic_inc(&sbi->ll_sa_total);
- cfs_spin_lock(&plli->lli_sa_lock);
- thread_set_flags(thread, SVC_RUNNING);
- cfs_spin_unlock(&plli->lli_sa_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ spin_lock(&plli->lli_sa_lock);
+ thread_set_flags(thread, SVC_RUNNING);
+ spin_unlock(&plli->lli_sa_lock);
+ cfs_waitq_signal(&thread->t_ctl_waitq);
- ll_dir_chain_init(&chain);
+ ll_dir_chain_init(&chain);
page = ll_get_dir_page(dir, pos, &chain);
while (1) {
* some AGL entries to be triggered, then try to help
* to process the AGL entries. */
if (sa_sent_full(sai)) {
- cfs_spin_lock(&plli->lli_agl_lock);
- while (!agl_list_empty(sai)) {
- clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
- cfs_spin_unlock(&plli->lli_agl_lock);
+ spin_lock(&plli->lli_agl_lock);
+ while (!agl_list_empty(sai)) {
+ clli = agl_first_entry(sai);
+ cfs_list_del_init(&clli->lli_agl_list);
+ spin_unlock(&plli->lli_agl_lock);
ll_agl_trigger(&clli->lli_vfs_inode,
sai);
if (!sa_sent_full(sai))
goto do_it;
- cfs_spin_lock(&plli->lli_agl_lock);
- }
- cfs_spin_unlock(&plli->lli_agl_lock);
+ spin_lock(&plli->lli_agl_lock);
+ }
+ spin_unlock(&plli->lli_agl_lock);
goto keep_it;
}
break;
}
- cfs_spin_lock(&plli->lli_agl_lock);
- while (!agl_list_empty(sai) &&
- thread_is_running(thread)) {
- clli = agl_first_entry(sai);
- cfs_list_del_init(&clli->lli_agl_list);
- cfs_spin_unlock(&plli->lli_agl_lock);
- ll_agl_trigger(&clli->lli_vfs_inode, sai);
- cfs_spin_lock(&plli->lli_agl_lock);
- }
- cfs_spin_unlock(&plli->lli_agl_lock);
+ spin_lock(&plli->lli_agl_lock);
+ while (!agl_list_empty(sai) &&
+ thread_is_running(thread)) {
+ clli = agl_first_entry(sai);
+ cfs_list_del_init(&clli->lli_agl_list);
+ spin_unlock(&plli->lli_agl_lock);
+ ll_agl_trigger(&clli->lli_vfs_inode, sai);
+ spin_lock(&plli->lli_agl_lock);
+ }
+ spin_unlock(&plli->lli_agl_lock);
GOTO(out, rc = 0);
} else if (1) {
out:
if (sai->sai_agl_valid) {
- cfs_spin_lock(&plli->lli_agl_lock);
- thread_set_flags(agl_thread, SVC_STOPPING);
- cfs_spin_unlock(&plli->lli_agl_lock);
+ spin_lock(&plli->lli_agl_lock);
+ thread_set_flags(agl_thread, SVC_STOPPING);
+ spin_unlock(&plli->lli_agl_lock);
cfs_waitq_signal(&agl_thread->t_ctl_waitq);
CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
}
ll_dir_chain_fini(&chain);
- cfs_spin_lock(&plli->lli_sa_lock);
- if (!sa_received_empty(sai)) {
- thread_set_flags(thread, SVC_STOPPING);
- cfs_spin_unlock(&plli->lli_sa_lock);
-
- /* To release the resources held by received entries. */
- while (!sa_received_empty(sai))
- do_statahead_interpret(sai, NULL);
-
- cfs_spin_lock(&plli->lli_sa_lock);
- }
- thread_set_flags(thread, SVC_STOPPED);
- cfs_spin_unlock(&plli->lli_sa_lock);
+ spin_lock(&plli->lli_sa_lock);
+ if (!sa_received_empty(sai)) {
+ thread_set_flags(thread, SVC_STOPPING);
+ spin_unlock(&plli->lli_sa_lock);
+
+ /* To release the resources held by received entries. */
+ while (!sa_received_empty(sai))
+ do_statahead_interpret(sai, NULL);
+
+ spin_lock(&plli->lli_sa_lock);
+ }
+ thread_set_flags(thread, SVC_STOPPED);
+ spin_unlock(&plli->lli_sa_lock);
cfs_waitq_signal(&sai->sai_waitq);
cfs_waitq_signal(&thread->t_ctl_waitq);
ll_sai_put(sai);
*/
void ll_stop_statahead(struct inode *dir, void *key)
{
- struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_inode_info *lli = ll_i2info(dir);
- if (unlikely(key == NULL))
- return;
+ if (unlikely(key == NULL))
+ return;
- cfs_spin_lock(&lli->lli_sa_lock);
- if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
- cfs_spin_unlock(&lli->lli_sa_lock);
+ spin_lock(&lli->lli_sa_lock);
+ if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
+ spin_unlock(&lli->lli_sa_lock);
return;
}
if (!thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
- cfs_spin_unlock(&lli->lli_sa_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
-
- CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
- cfs_curproc_pid());
- l_wait_event(thread->t_ctl_waitq,
- thread_is_stopped(thread),
- &lwi);
- } else {
- cfs_spin_unlock(&lli->lli_sa_lock);
- }
-
- /*
- * Put the ref which was held when first statahead_enter.
- * It maybe not the last ref for some statahead requests
- * maybe inflight.
- */
- ll_sai_put(lli->lli_sai);
- } else {
- lli->lli_opendir_pid = 0;
- cfs_spin_unlock(&lli->lli_sa_lock);
- }
+ spin_unlock(&lli->lli_sa_lock);
+ cfs_waitq_signal(&thread->t_ctl_waitq);
+
+ CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
+ cfs_curproc_pid());
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopped(thread),
+ &lwi);
+ } else {
+ spin_unlock(&lli->lli_sa_lock);
+ }
+
+ /*
+ * Put the ref which was held when first statahead_enter.
+ * It maybe not the last ref for some statahead requests
+ * maybe inflight.
+ */
+ ll_sai_put(lli->lli_sai);
+ } else {
+ lli->lli_opendir_pid = 0;
+ spin_unlock(&lli->lli_sa_lock);
+ }
}
enum {
PFID(&lli->lli_fid), sai->sai_hit,
sai->sai_miss, sai->sai_sent,
sai->sai_replied, cfs_curproc_pid());
- cfs_spin_lock(&lli->lli_sa_lock);
- if (!thread_is_stopped(thread))
- thread_set_flags(thread, SVC_STOPPING);
- cfs_spin_unlock(&lli->lli_sa_lock);
- }
- }
+ spin_lock(&lli->lli_sa_lock);
+ if (!thread_is_stopped(thread))
+ thread_set_flags(thread, SVC_STOPPING);
+ spin_unlock(&lli->lli_sa_lock);
+ }
+ }
- if (!thread_is_stopped(thread))
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ if (!thread_is_stopped(thread))
+ cfs_waitq_signal(&thread->t_ctl_waitq);
- EXIT;
+ EXIT;
}
/**
out:
if (sai != NULL)
OBD_FREE_PTR(sai);
- cfs_spin_lock(&lli->lli_sa_lock);
- lli->lli_opendir_key = NULL;
- lli->lli_opendir_pid = 0;
- cfs_spin_unlock(&lli->lli_sa_lock);
- return rc;
+ spin_lock(&lli->lli_sa_lock);
+ lli->lli_opendir_key = NULL;
+ lli->lli_opendir_pid = 0;
+ spin_unlock(&lli->lli_sa_lock);
+ return rc;
}
/* got an object. Find next page. */
hdr = cl_object_header(clob);
- cfs_spin_lock(&hdr->coh_page_guard);
+ spin_lock(&hdr->coh_page_guard);
nr = radix_tree_gang_lookup(&hdr->coh_tree,
(void **)&pg,
id.vpi_index, 1);
/* Cant support over 16T file */
nr = !(pg->cp_index > 0xffffffff);
}
- cfs_spin_unlock(&hdr->coh_page_guard);
+ spin_unlock(&hdr->coh_page_guard);
lu_object_ref_del(&clob->co_lu, "dump", cfs_current());
cl_object_put(env, clob);
}
#define seq_page_flag(seq, page, flag, has_flags) do { \
- if (cfs_test_bit(PG_##flag, &(page)->flags)) { \
+ if (test_bit(PG_##flag, &(page)->flags)) { \
seq_printf(seq, "%s"#flag, has_flags ? "|" : ""); \
has_flags = 1; \
} \
if (clob != NULL) {
hdr = cl_object_header(clob);
- cfs_spin_lock(&hdr->coh_page_guard);
- page = cl_page_lookup(hdr, id.vpi_index);
- cfs_spin_unlock(&hdr->coh_page_guard);
+ spin_lock(&hdr->coh_page_guard);
+ page = cl_page_lookup(hdr, id.vpi_index);
+ spin_unlock(&hdr->coh_page_guard);
seq_printf(f, "%8x@"DFID": ",
id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
struct ll_inode_info *lli = ll_i2info(inode);
struct posix_acl *acl;
- cfs_spin_lock(&lli->lli_lock);
- acl = posix_acl_dup(lli->lli_posix_acl);
- cfs_spin_unlock(&lli->lli_lock);
+ spin_lock(&lli->lli_lock);
+ acl = posix_acl_dup(lli->lli_posix_acl);
+ spin_unlock(&lli->lli_lock);
if (!acl)
RETURN(-ENODATA);
#define LMV_MAX_TGT_COUNT 128
-#define lmv_init_lock(lmv) cfs_mutex_lock(&lmv->init_mutex);
-#define lmv_init_unlock(lmv) cfs_mutex_unlock(&lmv->init_mutex);
+#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex);
+#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex);
#define LL_IT2STR(it) \
((it) ? ldlm_it2str((it)->it_op) : "0")
/**
* Sema for protecting fields.
*/
- cfs_mutex_t lo_guard;
+ struct mutex lo_guard;
/**
* Object state like O_FREEING.
*/
lmv_object_lock(struct lmv_object *obj)
{
LASSERT(obj);
- cfs_mutex_lock(&obj->lo_guard);
+ mutex_lock(&obj->lo_guard);
}
static inline void
lmv_object_unlock(struct lmv_object *obj)
{
LASSERT(obj);
- cfs_mutex_unlock(&obj->lo_guard);
+ mutex_unlock(&obj->lo_guard);
}
void lmv_object_add(struct lmv_object *obj);
CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
lmv, uuid->uuid, activate);
- cfs_spin_lock(&lmv->lmv_lock);
+ spin_lock(&lmv->lmv_lock);
for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
if (tgt->ltd_exp == NULL)
continue;
EXIT;
out_lmv_lock:
- cfs_spin_unlock(&lmv->lmv_lock);
- return rc;
+ spin_unlock(&lmv->lmv_lock);
+ return rc;
}
static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid,
- struct obd_connect_data *data)
+ struct obd_connect_data *data)
{
- struct lmv_tgt_desc *tgt;
- int i;
- ENTRY;
+ struct lmv_tgt_desc *tgt;
+ int i;
+ ENTRY;
- LASSERT(data != NULL);
+ LASSERT(data != NULL);
- cfs_spin_lock(&lmv->lmv_lock);
- for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
- if (tgt->ltd_exp == NULL)
- continue;
+ spin_lock(&lmv->lmv_lock);
+ for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
+ if (tgt->ltd_exp == NULL)
+ continue;
- if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
- lmv->datas[tgt->ltd_idx] = *data;
- break;
- }
- }
- cfs_spin_unlock(&lmv->lmv_lock);
- RETURN(0);
+ if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
+ lmv->datas[tgt->ltd_idx] = *data;
+ break;
+ }
+ }
+ spin_unlock(&lmv->lmv_lock);
+ RETURN(0);
}
struct obd_uuid *lmv_get_uuid(struct obd_export *exp) {
RETURN(-EINVAL);
}
}
- cfs_spin_lock(&lmv->lmv_lock);
- tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
- tgt->ltd_uuid = *tgt_uuid;
- cfs_spin_unlock(&lmv->lmv_lock);
-
- if (lmv->connected) {
- rc = lmv_connect_mdc(obd, tgt);
- if (rc) {
- cfs_spin_lock(&lmv->lmv_lock);
- lmv->desc.ld_tgt_count--;
- memset(tgt, 0, sizeof(*tgt));
- cfs_spin_unlock(&lmv->lmv_lock);
+ spin_lock(&lmv->lmv_lock);
+ tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
+ tgt->ltd_uuid = *tgt_uuid;
+ spin_unlock(&lmv->lmv_lock);
+
+ if (lmv->connected) {
+ rc = lmv_connect_mdc(obd, tgt);
+ if (rc) {
+ spin_lock(&lmv->lmv_lock);
+ lmv->desc.ld_tgt_count--;
+ memset(tgt, 0, sizeof(*tgt));
+ spin_unlock(&lmv->lmv_lock);
} else {
int easize = sizeof(struct lmv_stripe_md) +
lmv->desc.ld_tgt_count *
* New seq alloc and FLD setup should be atomic. Otherwise we may find
* on server that seq in new allocated fid is not yet known.
*/
- cfs_mutex_lock(&tgt->ltd_fid_mutex);
+ mutex_lock(&tgt->ltd_fid_mutex);
if (!tgt->ltd_active)
GOTO(out, rc = -ENODEV);
EXIT;
out:
- cfs_mutex_unlock(&tgt->ltd_fid_mutex);
+ mutex_unlock(&tgt->ltd_fid_mutex);
return rc;
}
RETURN(-ENOMEM);
for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
- cfs_mutex_init(&lmv->tgts[i].ltd_fid_mutex);
+ mutex_init(&lmv->tgts[i].ltd_fid_mutex);
lmv->tgts[i].ltd_idx = i;
}
lmv->max_easize = 0;
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
- cfs_spin_lock_init(&lmv->lmv_lock);
- cfs_mutex_init(&lmv->init_mutex);
+ spin_lock_init(&lmv->lmv_lock);
+ mutex_init(&lmv->init_mutex);
rc = lmv_object_setup(obd);
if (rc) {
obj->lo_state = 0;
obj->lo_hashtype = mea->mea_magic;
- cfs_mutex_init(&obj->lo_guard);
+ mutex_init(&obj->lo_guard);
cfs_atomic_set(&obj->lo_count, 0);
obj->lo_objcount = mea->mea_count;
void lmv_object_add(struct lmv_object *obj)
{
- cfs_spin_lock(&obj_list_lock);
- __lmv_object_add(obj);
- cfs_spin_unlock(&obj_list_lock);
+ spin_lock(&obj_list_lock);
+ __lmv_object_add(obj);
+ spin_unlock(&obj_list_lock);
}
static void __lmv_object_del(struct lmv_object *obj)
void lmv_object_del(struct lmv_object *obj)
{
- cfs_spin_lock(&obj_list_lock);
- __lmv_object_del(obj);
- cfs_spin_unlock(&obj_list_lock);
+ spin_lock(&obj_list_lock);
+ __lmv_object_del(obj);
+ spin_unlock(&obj_list_lock);
}
static struct lmv_object *__lmv_object_get(struct lmv_object *obj)
struct lmv_object *lmv_object_get(struct lmv_object *obj)
{
- cfs_spin_lock(&obj_list_lock);
- __lmv_object_get(obj);
- cfs_spin_unlock(&obj_list_lock);
- return obj;
+ spin_lock(&obj_list_lock);
+ __lmv_object_get(obj);
+ spin_unlock(&obj_list_lock);
+ return obj;
}
static void __lmv_object_put(struct lmv_object *obj)
void lmv_object_put(struct lmv_object *obj)
{
- cfs_spin_lock(&obj_list_lock);
- __lmv_object_put(obj);
- cfs_spin_unlock(&obj_list_lock);
+ spin_lock(&obj_list_lock);
+ __lmv_object_put(obj);
+ spin_unlock(&obj_list_lock);
}
void lmv_object_put_unlock(struct lmv_object *obj)
}
struct lmv_object *lmv_object_find(struct obd_device *obd,
- const struct lu_fid *fid)
+ const struct lu_fid *fid)
{
- struct lmv_obd *lmv = &obd->u.lmv;
- struct lmv_object *obj = NULL;
- ENTRY;
-
- /* For single MDT case, lmv_object list is always empty. */
- if (lmv->desc.ld_tgt_count > 1) {
- cfs_spin_lock(&obj_list_lock);
- obj = __lmv_object_find(obd, fid);
- cfs_spin_unlock(&obj_list_lock);
- }
-
- RETURN(obj);
+ struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_object *obj = NULL;
+ ENTRY;
+
+ /* For single MDT case, lmv_object list is always empty. */
+ if (lmv->desc.ld_tgt_count > 1) {
+ spin_lock(&obj_list_lock);
+ obj = __lmv_object_find(obd, fid);
+ spin_unlock(&obj_list_lock);
+ }
+
+ RETURN(obj);
}
struct lmv_object *lmv_object_find_lock(struct obd_device *obd,
if (!new)
RETURN(NULL);
- /*
- * Check if someone created it already while we were dealing with
- * allocating @obj.
- */
- cfs_spin_lock(&obj_list_lock);
- obj = __lmv_object_find(obd, fid);
- if (obj) {
- /*
- * Someone created it already - put @obj and getting out.
- */
- cfs_spin_unlock(&obj_list_lock);
- lmv_object_free(new);
- RETURN(obj);
- }
-
- __lmv_object_add(new);
- __lmv_object_get(new);
-
- cfs_spin_unlock(&obj_list_lock);
-
- CDEBUG(D_INODE, "New obj in lmv cache: "DFID"\n",
- PFID(fid));
-
- RETURN(new);
+ /*
+ * Check if someone created it already while we were dealing with
+ * allocating @obj.
+ */
+ spin_lock(&obj_list_lock);
+ obj = __lmv_object_find(obd, fid);
+ if (obj) {
+ /*
+ * Someone created it already - put @obj and getting out.
+ */
+ spin_unlock(&obj_list_lock);
+ lmv_object_free(new);
+ RETURN(obj);
+ }
+
+ __lmv_object_add(new);
+ __lmv_object_get(new);
+
+ spin_unlock(&obj_list_lock);
+
+ CDEBUG(D_INODE, "New obj in lmv cache: "DFID"\n", PFID(fid));
+
+ RETURN(new);
}
struct lmv_object *lmv_object_create(struct obd_export *exp,
int lmv_object_delete(struct obd_export *exp, const struct lu_fid *fid)
{
- struct obd_device *obd = exp->exp_obd;
- struct lmv_object *obj;
- int rc = 0;
- ENTRY;
-
- cfs_spin_lock(&obj_list_lock);
- obj = __lmv_object_find(obd, fid);
- if (obj) {
- obj->lo_state |= O_FREEING;
- __lmv_object_put(obj);
- __lmv_object_put(obj);
- rc = 1;
- }
- cfs_spin_unlock(&obj_list_lock);
- RETURN(rc);
+ struct obd_device *obd = exp->exp_obd;
+ struct lmv_object *obj;
+ int rc = 0;
+ ENTRY;
+
+ spin_lock(&obj_list_lock);
+ obj = __lmv_object_find(obd, fid);
+ if (obj) {
+ obj->lo_state |= O_FREEING;
+ __lmv_object_put(obj);
+ __lmv_object_put(obj);
+ rc = 1;
+ }
+ spin_unlock(&obj_list_lock);
+ RETURN(rc);
}
int lmv_object_setup(struct obd_device *obd)
CDEBUG(D_INFO, "LMV object manager cleanup (%s)\n",
obd->obd_uuid.uuid);
- cfs_spin_lock(&obj_list_lock);
+ spin_lock(&obj_list_lock);
cfs_list_for_each_safe(cur, tmp, &obj_list) {
obj = cfs_list_entry(cur, struct lmv_object, lo_list);
}
__lmv_object_put(obj);
}
- cfs_spin_unlock(&obj_list_lock);
- EXIT;
+ spin_unlock(&obj_list_lock);
+ EXIT;
}
policy = placement_name2policy(dummy, len);
if (policy != PLACEMENT_INVAL_POLICY) {
- cfs_spin_lock(&lmv->lmv_lock);
- lmv->lmv_placement = policy;
- cfs_spin_unlock(&lmv->lmv_lock);
+ spin_lock(&lmv->lmv_lock);
+ lmv->lmv_placement = policy;
+ spin_unlock(&lmv->lmv_lock);
} else {
CERROR("Invalid placement policy \"%s\"!\n", dummy);
return -EINVAL;
}
}
- cfs_mutex_init(&lod->lod_mutex);
- cfs_init_rwsem(&lod->lod_rw_sem);
- cfs_spin_lock_init(&lod->lod_desc_lock);
+ mutex_init(&lod->lod_mutex);
+ init_rwsem(&lod->lod_rw_sem);
+ spin_lock_init(&lod->lod_desc_lock);
RETURN(0);
*exp = class_conn2export(&conn);
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
lod->lod_connects++;
/* at the moment we expect the only user */
LASSERT(lod->lod_connects == 1);
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
RETURN(0);
}
ENTRY;
/* Only disconnect the underlying layers on the final disconnect. */
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
lod->lod_connects--;
if (lod->lod_connects != 0) {
/* why should there be more than 1 connect? */
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
CERROR("%s: disconnect #%d\n", exp->exp_obd->obd_name,
lod->lod_connects);
goto out;
}
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
/* the last user of lod has gone, let's release the device */
release = 1;
struct lov_desc lod_desc;
/* use to protect ld_active_tgt_count and all ltd_active */
- cfs_spinlock_t lod_desc_lock;
+ spinlock_t lod_desc_lock;
/* list of known OSTs */
struct lod_ost_desc_idx *lod_ost_idx[OST_PTRS];
/* Table refcount used for delayed deletion */
int lod_refcount;
/* mutex to serialize concurrent updates to the ost table */
- cfs_mutex_t lod_mutex;
+ struct mutex lod_mutex;
/* read/write semaphore used for array relocation */
- cfs_rw_semaphore_t lod_rw_sem;
+ struct rw_semaphore lod_rw_sem;
/* QoS info per LOD */
struct lov_qos lod_qos; /* qos info per lod */
*/
void lod_getref(struct lod_device *lod)
{
- cfs_down_read(&lod->lod_rw_sem);
- cfs_mutex_lock(&lod->lod_mutex);
+ down_read(&lod->lod_rw_sem);
+ mutex_lock(&lod->lod_mutex);
lod->lod_refcount++;
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
}
/*
*/
void lod_putref(struct lod_device *lod)
{
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
lod->lod_refcount--;
if (lod->lod_refcount == 0 && lod->lod_death_row) {
struct lod_ost_desc *ost_desc, *tmp;
lod->lod_desc.ld_active_tgt_count--;
lod->lod_death_row--;
}
- cfs_mutex_unlock(&lod->lod_mutex);
- cfs_up_read(&lod->lod_rw_sem);
+ mutex_unlock(&lod->lod_mutex);
+ up_read(&lod->lod_rw_sem);
cfs_list_for_each_entry_safe(ost_desc, tmp, &kill, ltd_kill) {
int rc;
OBD_FREE_PTR(ost_desc);
}
} else {
- cfs_mutex_unlock(&lod->lod_mutex);
- cfs_up_read(&lod->lod_rw_sem);
+ mutex_unlock(&lod->lod_mutex);
+ up_read(&lod->lod_rw_sem);
}
}
/* grab write reference on the lod. Relocating the array requires
* exclusive access */
- cfs_down_write(&lod->lod_rw_sem);
+ down_write(&lod->lod_rw_sem);
if (newsize <= lod->lod_osts_size)
/* someone else has already resize the array */
EXIT;
out:
- cfs_up_write(&lod->lod_rw_sem);
+ up_write(&lod->lod_rw_sem);
return rc;
}
lod_getref(lod);
}
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
if (cfs_bitmap_check(lod->lod_ost_bitmap, index)) {
CERROR("%s: device %d is registered already\n", obd->obd_name,
index);
OST_TGT(lod, index) = ost_desc;
cfs_bitmap_set(lod->lod_ost_bitmap, index);
lod->lod_ostnr++;
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
lod_putref(lod);
if (lod->lod_recovery_completed)
out_pool:
lod_ost_pool_remove(&lod->lod_pool_info, index);
out_mutex:
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
lod_putref(lod);
out_desc:
OBD_FREE_PTR(ost_desc);
obd_str2uuid(&uuid, osp);
lod_getref(lod);
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
/* check that the index is allocated in the bitmap */
if (!cfs_bitmap_check(lod->lod_ost_bitmap, idx) || !OST_TGT(lod,idx)) {
CERROR("%s: device %d is not set up\n", obd->obd_name, idx);
__lod_del_device(lod, idx);
EXIT;
out:
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
lod_putref(lod);
return(rc);
}
/* Set up allocation policy (QoS and RR) */
CFS_INIT_LIST_HEAD(&lod->lod_qos.lq_oss_list);
- cfs_init_rwsem(&lod->lod_qos.lq_rw_sem);
+ init_rwsem(&lod->lod_qos.lq_rw_sem);
lod->lod_qos.lq_dirty = 1;
lod->lod_qos.lq_rr.lqr_dirty = 1;
lod->lod_qos.lq_reset = 1;
if (lod->lod_osts_size > 0) {
int idx;
lod_getref(lod);
- cfs_mutex_lock(&lod->lod_mutex);
+ mutex_lock(&lod->lod_mutex);
cfs_foreach_bit(lod->lod_ost_bitmap, idx)
__lod_del_device(lod, idx);
- cfs_mutex_unlock(&lod->lod_mutex);
+ mutex_unlock(&lod->lod_mutex);
lod_putref(lod);
CFS_FREE_BITMAP(lod->lod_ost_bitmap);
for (idx = 0; idx < OST_PTRS; idx++) {
/* iterate to find a non empty entry */
prev_idx = iter->idx;
- cfs_down_read(&pool_tgt_rw_sem(iter->pool));
+ down_read(&pool_tgt_rw_sem(iter->pool));
iter->idx++;
if (iter->idx == pool_tgt_count(iter->pool)) {
iter->idx = prev_idx; /* we stay on the last entry */
- cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+ up_read(&pool_tgt_rw_sem(iter->pool));
return NULL;
}
- cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+ up_read(&pool_tgt_rw_sem(iter->pool));
(*pos)++;
/* return != NULL to continue */
return iter;
LASSERT(iter->pool != NULL);
LASSERT(iter->idx <= pool_tgt_count(iter->pool));
- cfs_down_read(&pool_tgt_rw_sem(iter->pool));
+ down_read(&pool_tgt_rw_sem(iter->pool));
osc_desc = pool_tgt(iter->pool, iter->idx);
- cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+ up_read(&pool_tgt_rw_sem(iter->pool));
if (osc_desc)
seq_printf(s, "%s\n", obd_uuid2str(&(osc_desc->ltd_uuid)));
CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n",
pool->pool_name, pool->pool_obds.op_count);
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
for (i = 0; i < pool_tgt_count(pool) ; i++) {
if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp)
obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid)));
}
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
lod_pool_putref(pool);
}
count = LOD_POOL_INIT_COUNT;
op->op_array = NULL;
op->op_count = 0;
- cfs_init_rwsem(&op->op_rw_sem);
+ init_rwsem(&op->op_rw_sem);
op->op_size = count;
OBD_ALLOC(op->op_array, op->op_size * sizeof(op->op_array[0]));
if (op->op_array == NULL) {
int rc = 0, i;
ENTRY;
- cfs_down_write(&op->op_rw_sem);
+ down_write(&op->op_rw_sem);
rc = lod_ost_pool_extend(op, min_count);
if (rc)
op->op_count++;
EXIT;
out:
- cfs_up_write(&op->op_rw_sem);
+ up_write(&op->op_rw_sem);
return rc;
}
int i;
ENTRY;
- cfs_down_write(&op->op_rw_sem);
+ down_write(&op->op_rw_sem);
for (i = 0; i < op->op_count; i++) {
if (op->op_array[i] == idx) {
memmove(&op->op_array[i], &op->op_array[i + 1],
(op->op_count - i - 1) * sizeof(op->op_array[0]));
op->op_count--;
- cfs_up_write(&op->op_rw_sem);
+ up_write(&op->op_rw_sem);
EXIT;
return 0;
}
}
- cfs_up_write(&op->op_rw_sem);
+ up_write(&op->op_rw_sem);
RETURN(-EINVAL);
}
if (op->op_size == 0)
RETURN(0);
- cfs_down_write(&op->op_rw_sem);
+ down_write(&op->op_rw_sem);
OBD_FREE(op->op_array, op->op_size * sizeof(op->op_array[0]));
op->op_array = NULL;
op->op_count = 0;
op->op_size = 0;
- cfs_up_write(&op->op_rw_sem);
+ up_write(&op->op_rw_sem);
RETURN(0);
}
CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool, new_pool->pool_proc_entry);
#endif
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
cfs_list_add_tail(&new_pool->pool_list, &lod->lod_pool_list);
lod->lod_pool_count++;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
/* add to find only when it fully ready */
rc = cfs_hash_add_unique(lod->lod_pools_hash_body, poolname,
RETURN(0);
out_err:
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
cfs_list_del_init(&new_pool->pool_list);
lod->lod_pool_count--;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
lprocfs_remove(&new_pool->pool_proc_entry);
lod_pool_putref(pool);
}
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
cfs_list_del_init(&pool->pool_list);
lod->lod_pool_count--;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
/* release last reference */
lod_pool_putref(pool);
*/
lod_pool_getref(pool);
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
for (i = 0; i < pool_tgt_count(pool); i++) {
if (pool_tgt_array(pool)[i] == idx)
rc = -ENOENT;
EXIT;
out:
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
lod_pool_putref(pool);
return rc;
cfs_list_t *list;
ENTRY;
- cfs_down_write(&lod->lod_qos.lq_rw_sem);
+ down_write(&lod->lod_qos.lq_rw_sem);
/*
* a bit hacky approach to learn NID of corresponding connection
* but there is no official API to access information like this
lod->lod_qos.lq_rr.lqr_dirty = 1;
out:
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
RETURN(rc);
}
int rc = 0;
ENTRY;
- cfs_down_write(&lod->lod_qos.lq_rw_sem);
+ down_write(&lod->lod_qos.lq_rw_sem);
oss = ost_desc->ltd_qos.ltq_oss;
if (!oss)
GOTO(out, rc = -ENOENT);
lod->lod_qos.lq_dirty = 1;
lod->lod_qos.lq_rr.lqr_dirty = 1;
out:
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
RETURN(rc);
}
/* check whether device has changed state (active, inactive) */
if (rc != 0 && ost->ltd_active) {
/* turned inactive? */
- cfs_spin_lock(&d->lod_desc_lock);
+ spin_lock(&d->lod_desc_lock);
if (ost->ltd_active) {
ost->ltd_active = 0;
LASSERT(d->lod_desc.ld_active_tgt_count > 0);
CDEBUG(D_CONFIG, "%s: turns inactive\n",
ost->ltd_exp->exp_obd->obd_name);
}
- cfs_spin_unlock(&d->lod_desc_lock);
+ spin_unlock(&d->lod_desc_lock);
} else if (rc == 0 && ost->ltd_active == 0) {
/* turned active? */
LASSERT(d->lod_desc.ld_active_tgt_count < d->lod_ostnr);
- cfs_spin_lock(&d->lod_desc_lock);
+ spin_lock(&d->lod_desc_lock);
if (ost->ltd_active == 0) {
ost->ltd_active = 1;
d->lod_desc.ld_active_tgt_count++;
CDEBUG(D_CONFIG, "%s: turns active\n",
ost->ltd_exp->exp_obd->obd_name);
}
- cfs_spin_unlock(&d->lod_desc_lock);
+ spin_unlock(&d->lod_desc_lock);
}
return rc;
/* statfs data are quite recent, don't need to refresh it */
RETURN_EXIT;
- cfs_down_write(&lod->lod_qos.lq_rw_sem);
+ down_write(&lod->lod_qos.lq_rw_sem);
if (cfs_time_beforeq_64(max_age, obd->obd_osfs_age))
GOTO(out, rc = 0);
obd->obd_osfs_age = cfs_time_current_64();
out:
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
}
/* Recalculate per-object penalties for OSSs and OSTs,
}
/* Do actual allocation. */
- cfs_down_write(&lod->lod_qos.lq_rw_sem);
+ down_write(&lod->lod_qos.lq_rw_sem);
/*
* Check again. While we were sleeping on @lq_rw_sem something could
*/
if (!lqr->lqr_dirty) {
LASSERT(lqr->lqr_pool.op_size);
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
RETURN(0);
}
lqr->lqr_pool.op_count = real_count;
rc = lod_ost_pool_extend(&lqr->lqr_pool, real_count);
if (rc) {
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
RETURN(rc);
}
for (i = 0; i < lqr->lqr_pool.op_count; i++)
}
lqr->lqr_dirty = 0;
- cfs_up_write(&lod->lod_qos.lq_rw_sem);
+ up_write(&lod->lod_qos.lq_rw_sem);
if (placed != real_count) {
/* This should never happen */
/* the minimum of 0.1% used blocks and 1GB bytes. */
used = min_t(__u64, (msfs->os_blocks - msfs->os_bfree) >> 10,
- 1 << (31 - cfs_ffs(bs)));
+ 1 << (31 - ffs(bs)));
return (msfs->os_bavail < used);
}
pool = lod_find_pool(m, lo->ldo_pool);
if (pool != NULL) {
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
lqr = &(pool->pool_rr);
} else {
if (stripe_cnt > 1 && (osts->op_count % stripe_cnt) != 1)
++lqr->lqr_offset_idx;
}
- cfs_down_read(&m->lod_qos.lq_rw_sem);
+ down_read(&m->lod_qos.lq_rw_sem);
ost_start_idx_temp = lqr->lqr_start_idx;
repeat_find:
goto repeat_find;
}
- cfs_up_read(&m->lod_qos.lq_rw_sem);
+ up_read(&m->lod_qos.lq_rw_sem);
if (stripe_idx) {
lo->ldo_stripenr = stripe_idx;
out:
if (pool != NULL) {
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lod_find_pool() */
lod_pool_putref(pool);
}
pool = lod_find_pool(m, lo->ldo_pool);
if (pool != NULL) {
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
} else {
osts = &(m->lod_pool_info);
rc = -EFBIG;
out:
if (pool != NULL) {
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lod_find_pool() */
lod_pool_putref(pool);
}
pool = lod_find_pool(m, lo->ldo_pool);
if (pool != NULL) {
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
osts = &(pool->pool_obds);
} else {
osts = &(m->lod_pool_info);
GOTO(out_nolock, rc = -EAGAIN);
/* Do actual allocation, use write lock here. */
- cfs_down_write(&m->lod_qos.lq_rw_sem);
+ down_write(&m->lod_qos.lq_rw_sem);
/*
* Check again, while we were sleeping on @lq_rw_sem things could
}
out:
- cfs_up_write(&m->lod_qos.lq_rw_sem);
+ up_write(&m->lod_qos.lq_rw_sem);
out_nolock:
if (pool != NULL) {
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
/* put back ref got by lod_find_pool() */
lod_pool_putref(pool);
}
if (*pos >= lod->lod_ost_bitmap->size)
return NULL;
- *pos = cfs_find_next_bit(lod->lod_ost_bitmap->data,
+ *pos = find_next_bit(lod->lod_ost_bitmap->data,
lod->lod_ost_bitmap->size, *pos);
if (*pos < lod->lod_ost_bitmap->size)
return OST_TGT(lod,*pos);
if (*pos >= lod->lod_ost_bitmap->size - 1)
return NULL;
- *pos = cfs_find_next_bit(lod->lod_ost_bitmap->data,
+ *pos = find_next_bit(lod->lod_ost_bitmap->data,
lod->lod_ost_bitmap->size, *pos + 1);
if (*pos < lod->lod_ost_bitmap->size)
return OST_TGT(lod,*pos);
* Serializes access to lov_device::ld_emrg in low-memory
* conditions.
*/
- cfs_mutex_t ld_mutex;
+ struct mutex ld_mutex;
};
/**
*
* \see lov_object::lo_type
*/
- cfs_rw_semaphore_t lo_type_guard;
+ struct rw_semaphore lo_type_guard;
/**
* Type of an object. Protected by lov_object::lo_type_guard.
*/
/**
* protect lo_sub
*/
- cfs_spinlock_t lo_sub_lock;
+ spinlock_t lo_sub_lock;
/**
* Cached object attribute, built from sub-object
* attributes.
cfs_mem_cache_t *lov_lock_link_kmem;
/** Lock class of lov_device::ld_mutex. */
-cfs_lock_class_key_t cl_lov_device_mutex_class;
+struct lock_class_key cl_lov_device_mutex_class;
struct lu_kmem_descr lov_caches[] = {
{
OBD_ALLOC(newd, tgt_size * sz);
if (newd != NULL) {
- cfs_mutex_lock(&dev->ld_mutex);
+ mutex_lock(&dev->ld_mutex);
if (sub_size > 0) {
memcpy(newd, dev->ld_target, sub_size * sz);
OBD_FREE(dev->ld_target, sub_size * sz);
if (dev->ld_emrg != NULL)
lov_emerg_free(dev->ld_emrg, sub_size);
dev->ld_emrg = emerg;
- cfs_mutex_unlock(&dev->ld_mutex);
+ mutex_unlock(&dev->ld_mutex);
} else {
lov_emerg_free(emerg, tgt_size);
result = -ENOMEM;
d->ld_ops = &lov_lu_ops;
ld->ld_cl.cd_ops = &lov_cl_ops;
- cfs_mutex_init(&ld->ld_mutex);
- cfs_lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
+ mutex_init(&ld->ld_mutex);
+ lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
/* setup the LOV OBD */
obd = class_name2obd(lustre_cfg_string(cfg, 0));
return;
}
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL &&
- (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
- imp->imp_connect_data.ocd_maxbytes > 0) {
- if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
- *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
- } else {
- *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
- }
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL &&
+ (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
+ imp->imp_connect_data.ocd_maxbytes > 0) {
+ if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
+ *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
+ } else {
+ *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+ }
+ spin_unlock(&imp->imp_lock);
}
static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
struct lov_lock_handles *set_lockh;
cfs_list_t set_list;
cfs_waitq_t set_waitq;
- cfs_spinlock_t set_lock;
+ spinlock_t set_lock;
};
extern cfs_mem_cache_t *lov_oinfo_slab;
sub->sub_borrowed = 0;
if (lio->lis_mem_frozen) {
- LASSERT(cfs_mutex_is_locked(&ld->ld_mutex));
+ LASSERT(mutex_is_locked(&ld->ld_mutex));
sub->sub_io = &ld->ld_emrg[stripe]->emrg_subio;
sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
sub->sub_borrowed = 1;
* In order to not make things worse, even don't try to
* allocate the memory with __GFP_NOWARN. -jay
*/
- cfs_mutex_lock(&ld->ld_mutex);
+ mutex_lock(&ld->ld_mutex);
lio->lis_mem_frozen = 1;
}
lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
}
lio->lis_mem_frozen = 0;
- cfs_mutex_unlock(&ld->ld_mutex);
+ mutex_unlock(&ld->ld_mutex);
}
RETURN(rc);
struct lov_obd *lov = &obd->u.lov;
/* nobody gets through here until lov_putref is done */
- cfs_mutex_lock(&lov->lov_lock);
+ mutex_lock(&lov->lov_lock);
cfs_atomic_inc(&lov->lov_refcount);
- cfs_mutex_unlock(&lov->lov_lock);
+ mutex_unlock(&lov->lov_lock);
return;
}
{
struct lov_obd *lov = &obd->u.lov;
- cfs_mutex_lock(&lov->lov_lock);
+ mutex_lock(&lov->lov_lock);
/* ok to dec to 0 more than once -- ltd_exp's will be null */
if (cfs_atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
CFS_LIST_HEAD(kill);
lov->lov_tgts[i] = NULL;
lov->lov_death_row--;
}
- cfs_mutex_unlock(&lov->lov_lock);
+ mutex_unlock(&lov->lov_lock);
cfs_list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
cfs_list_del(&tgt->ltd_kill);
__lov_del_obd(obd, tgt);
}
} else {
- cfs_mutex_unlock(&lov->lov_lock);
+ mutex_unlock(&lov->lov_lock);
}
}
if (tgt_obd == NULL)
RETURN(-EINVAL);
- cfs_mutex_lock(&lov->lov_lock);
+ mutex_lock(&lov->lov_lock);
if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
tgt = lov->lov_tgts[index];
CERROR("UUID %s already assigned at LOV target index %d\n",
obd_uuid2str(&tgt->ltd_uuid), index);
- cfs_mutex_unlock(&lov->lov_lock);
+ mutex_unlock(&lov->lov_lock);
RETURN(-EEXIST);
}
newsize = newsize << 1;
OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
if (newtgts == NULL) {
- cfs_mutex_unlock(&lov->lov_lock);
+ mutex_unlock(&lov->lov_lock);
RETURN(-ENOMEM);
}
OBD_ALLOC_PTR(tgt);
if (!tgt) {
- cfs_mutex_unlock(&lov->lov_lock);
+ mutex_unlock(&lov->lov_lock);
RETURN(-ENOMEM);
}
rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
if (rc) {
- cfs_mutex_unlock(&lov->lov_lock);
+ mutex_unlock(&lov->lov_lock);
OBD_FREE_PTR(tgt);
RETURN(rc);
}
if (index >= lov->desc.ld_tgt_count)
lov->desc.ld_tgt_count = index + 1;
- cfs_mutex_unlock(&lov->lov_lock);
+ mutex_unlock(&lov->lov_lock);
CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
index, tgt->ltd_gen, lov->desc.ld_tgt_count);
lov->desc = *desc;
lov->lov_tgt_size = 0;
- cfs_mutex_init(&lov->lov_lock);
+ mutex_init(&lov->lov_lock);
cfs_atomic_set(&lov->lov_refcount, 0);
lov->lov_sp_me = LUSTRE_SP_CLI;
void lov_stripe_lock(struct lov_stripe_md *md)
{
- LASSERT(md->lsm_lock_owner != cfs_curproc_pid());
- cfs_spin_lock(&md->lsm_lock);
- LASSERT(md->lsm_lock_owner == 0);
- md->lsm_lock_owner = cfs_curproc_pid();
+ LASSERT(md->lsm_lock_owner != cfs_curproc_pid());
+ spin_lock(&md->lsm_lock);
+ LASSERT(md->lsm_lock_owner == 0);
+ md->lsm_lock_owner = cfs_curproc_pid();
}
EXPORT_SYMBOL(lov_stripe_lock);
void lov_stripe_unlock(struct lov_stripe_md *md)
{
- LASSERT(md->lsm_lock_owner == cfs_curproc_pid());
- md->lsm_lock_owner = 0;
- cfs_spin_unlock(&md->lsm_lock);
+ LASSERT(md->lsm_lock_owner == cfs_curproc_pid());
+ md->lsm_lock_owner = 0;
+ spin_unlock(&md->lsm_lock);
}
EXPORT_SYMBOL(lov_stripe_unlock);
if (r0->lo_sub != NULL) {
result = 0;
subconf->coc_inode = conf->coc_inode;
- cfs_spin_lock_init(&r0->lo_sub_lock);
+ spin_lock_init(&r0->lo_sub_lock);
/*
* Create stripe cl_objects.
*/
/* this wait-queue is signaled at the end of
* lu_object_free(). */
cfs_set_current_state(CFS_TASK_UNINT);
- cfs_spin_lock(&r0->lo_sub_lock);
- if (r0->lo_sub[idx] == los) {
- cfs_spin_unlock(&r0->lo_sub_lock);
- cfs_waitq_wait(waiter, CFS_TASK_UNINT);
- } else {
- cfs_spin_unlock(&r0->lo_sub_lock);
+ spin_lock(&r0->lo_sub_lock);
+ if (r0->lo_sub[idx] == los) {
+ spin_unlock(&r0->lo_sub_lock);
+ cfs_waitq_wait(waiter, CFS_TASK_UNINT);
+ } else {
+ spin_unlock(&r0->lo_sub_lock);
cfs_set_current_state(CFS_TASK_RUNNING);
break;
}
static inline void lov_conf_freeze(struct lov_object *lov)
{
if (lov->lo_owner != cfs_current())
- cfs_down_read(&lov->lo_type_guard);
+ down_read(&lov->lo_type_guard);
}
static inline void lov_conf_thaw(struct lov_object *lov)
{
if (lov->lo_owner != cfs_current())
- cfs_up_read(&lov->lo_type_guard);
+ up_read(&lov->lo_type_guard);
}
#define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...) \
static void lov_conf_lock(struct lov_object *lov)
{
LASSERT(lov->lo_owner != cfs_current());
- cfs_down_write(&lov->lo_type_guard);
+ down_write(&lov->lo_type_guard);
LASSERT(lov->lo_owner == NULL);
lov->lo_owner = cfs_current();
}
static void lov_conf_unlock(struct lov_object *lov)
{
lov->lo_owner = NULL;
- cfs_up_write(&lov->lo_type_guard);
+ up_write(&lov->lo_type_guard);
}
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
int result;
ENTRY;
- cfs_init_rwsem(&lov->lo_type_guard);
+ init_rwsem(&lov->lo_type_guard);
cfs_waitq_init(&lov->lo_waitq);
/* no locking is necessary, as object is being created */
}
cfs_atomic_set(&(*lsmp)->lsm_refc, 1);
- cfs_spin_lock_init(&(*lsmp)->lsm_lock);
+ spin_lock_init(&(*lsmp)->lsm_lock);
(*lsmp)->lsm_magic = magic;
(*lsmp)->lsm_stripe_count = stripe_count;
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
/* iterate to find a non empty entry */
prev_idx = iter->idx;
- cfs_down_read(&pool_tgt_rw_sem(iter->pool));
+ down_read(&pool_tgt_rw_sem(iter->pool));
iter->idx++;
if (iter->idx == pool_tgt_count(iter->pool)) {
iter->idx = prev_idx; /* we stay on the last entry */
- cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+ up_read(&pool_tgt_rw_sem(iter->pool));
return NULL;
}
- cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+ up_read(&pool_tgt_rw_sem(iter->pool));
(*pos)++;
/* return != NULL to continue */
return iter;
LASSERT(iter->pool != NULL);
LASSERT(iter->idx <= pool_tgt_count(iter->pool));
- cfs_down_read(&pool_tgt_rw_sem(iter->pool));
+ down_read(&pool_tgt_rw_sem(iter->pool));
tgt = pool_tgt(iter->pool, iter->idx);
- cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+ up_read(&pool_tgt_rw_sem(iter->pool));
if (tgt)
seq_printf(s, "%s\n", obd_uuid2str(&(tgt->ltd_uuid)));
CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n",
pool->pool_name, pool->pool_obds.op_count);
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
for (i = 0; i < pool_tgt_count(pool) ; i++) {
if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp)
obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid)));
}
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
lov_pool_putref(pool);
}
count = LOV_POOL_INIT_COUNT;
op->op_array = NULL;
op->op_count = 0;
- cfs_init_rwsem(&op->op_rw_sem);
+ init_rwsem(&op->op_rw_sem);
op->op_size = count;
OBD_ALLOC(op->op_array, op->op_size * sizeof(op->op_array[0]));
if (op->op_array == NULL) {
int rc = 0, i;
ENTRY;
- cfs_down_write(&op->op_rw_sem);
+ down_write(&op->op_rw_sem);
rc = lov_ost_pool_extend(op, min_count);
if (rc)
op->op_count++;
EXIT;
out:
- cfs_up_write(&op->op_rw_sem);
+ up_write(&op->op_rw_sem);
return rc;
}
int i;
ENTRY;
- cfs_down_write(&op->op_rw_sem);
+ down_write(&op->op_rw_sem);
for (i = 0; i < op->op_count; i++) {
if (op->op_array[i] == idx) {
memmove(&op->op_array[i], &op->op_array[i + 1],
(op->op_count - i - 1) * sizeof(op->op_array[0]));
op->op_count--;
- cfs_up_write(&op->op_rw_sem);
+ up_write(&op->op_rw_sem);
EXIT;
return 0;
}
}
- cfs_up_write(&op->op_rw_sem);
+ up_write(&op->op_rw_sem);
RETURN(-EINVAL);
}
if (op->op_size == 0)
RETURN(0);
- cfs_down_write(&op->op_rw_sem);
+ down_write(&op->op_rw_sem);
OBD_FREE(op->op_array, op->op_size * sizeof(op->op_array[0]));
op->op_array = NULL;
op->op_count = 0;
op->op_size = 0;
- cfs_up_write(&op->op_rw_sem);
+ up_write(&op->op_rw_sem);
RETURN(0);
}
CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool, new_pool->pool_proc_entry);
#endif
- cfs_spin_lock(&obd->obd_dev_lock);
- cfs_list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
- lov->lov_pool_count++;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ cfs_list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
+ lov->lov_pool_count++;
+ spin_unlock(&obd->obd_dev_lock);
/* add to find only when it fully ready */
rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
RETURN(0);
out_err:
- cfs_spin_lock(&obd->obd_dev_lock);
- cfs_list_del_init(&new_pool->pool_list);
- lov->lov_pool_count--;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ cfs_list_del_init(&new_pool->pool_list);
+ lov->lov_pool_count--;
+ spin_unlock(&obd->obd_dev_lock);
lprocfs_remove(&new_pool->pool_proc_entry);
lov_pool_putref(pool);
}
- cfs_spin_lock(&obd->obd_dev_lock);
- cfs_list_del_init(&pool->pool_list);
- lov->lov_pool_count--;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ cfs_list_del_init(&pool->pool_list);
+ lov->lov_pool_count--;
+ spin_unlock(&obd->obd_dev_lock);
- /* release last reference */
- lov_pool_putref(pool);
+ /* release last reference */
+ lov_pool_putref(pool);
- RETURN(0);
+ RETURN(0);
}
*/
lov_pool_getref(pool);
- cfs_down_read(&pool_tgt_rw_sem(pool));
+ down_read(&pool_tgt_rw_sem(pool));
for (i = 0; i < pool_tgt_count(pool); i++) {
if (pool_tgt_array(pool)[i] == idx)
rc = -ENOENT;
EXIT;
out:
- cfs_up_read(&pool_tgt_rw_sem(pool));
+ up_read(&pool_tgt_rw_sem(pool));
lov_pool_putref(pool);
return rc;
CFS_INIT_LIST_HEAD(&set->set_list);
cfs_atomic_set(&set->set_refcount, 1);
cfs_waitq_init(&set->set_waitq);
- cfs_spin_lock_init(&set->set_lock);
+ spin_lock_init(&set->set_lock);
}
void lov_finish_set(struct lov_request_set *set)
if (osfs->os_ffree != LOV_U64_MAX)
lov_do_div64(osfs->os_ffree, expected_stripes);
- cfs_spin_lock(&obd->obd_osfs_lock);
- memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
- obd->obd_osfs_age = cfs_time_current_64();
- cfs_spin_unlock(&obd->obd_osfs_lock);
- RETURN(0);
- }
+ spin_lock(&obd->obd_osfs_lock);
+ memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
+ obd->obd_osfs_age = cfs_time_current_64();
+ spin_unlock(&obd->obd_osfs_lock);
+ RETURN(0);
+ }
- RETURN(-EIO);
+ RETURN(-EIO);
}
int lov_fini_statfs_set(struct lov_request_set *set)
GOTO(out_update, rc);
tgtobd = class_exp2obd(tgt->ltd_exp);
- cfs_spin_lock(&tgtobd->obd_osfs_lock);
- memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
- if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
- tgtobd->obd_osfs_age = cfs_time_current_64();
- cfs_spin_unlock(&tgtobd->obd_osfs_lock);
+ spin_lock(&tgtobd->obd_osfs_lock);
+ memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
+ if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
+ tgtobd->obd_osfs_age = cfs_time_current_64();
+ spin_unlock(&tgtobd->obd_osfs_lock);
out_update:
lov_update_statfs(osfs, lov_sfs, success);
if (lov) {
LASSERT(lov->lo_type == LLT_RAID0);
LASSERT(lov->u.raid0.lo_sub[los->lso_index] == los);
- cfs_spin_lock(&lov->u.raid0.lo_sub_lock);
- lov->u.raid0.lo_sub[los->lso_index] = NULL;
- cfs_spin_unlock(&lov->u.raid0.lo_sub_lock);
+ spin_lock(&lov->u.raid0.lo_sub_lock);
+ lov->u.raid0.lo_sub[los->lso_index] = NULL;
+ spin_unlock(&lov->u.raid0.lo_sub_lock);
}
lu_object_fini(obj);
int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
int pages, unsigned long *blocks,
int *created, int create,
- cfs_mutex_t *optional_mutex)
+ struct mutex *optional_mutex)
{
int rc;
return rc;
}
if (optional_mutex != NULL)
- cfs_mutex_lock(optional_mutex);
+ mutex_lock(optional_mutex);
rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
created, create);
if (optional_mutex != NULL)
- cfs_mutex_unlock(optional_mutex);
+ mutex_unlock(optional_mutex);
return rc;
}
/* prevent reading after eof */
spin_lock(&inode->i_lock);
- if (i_size_read(inode) < *offs + size) {
- size = i_size_read(inode) - *offs;
+ if (i_size_read(inode) < *offs + size) {
+ size = i_size_read(inode) - *offs;
spin_unlock(&inode->i_lock);
if (size < 0) {
CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
/* correct in-core and on-disk sizes */
if (new_size > i_size_read(inode)) {
spin_lock(&inode->i_lock);
- if (new_size > i_size_read(inode))
- i_size_write(inode, new_size);
- if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
- EXT3_I(inode)->i_disksize = i_size_read(inode);
- if (i_size_read(inode) > old_size) {
+ if (new_size > i_size_read(inode))
+ i_size_write(inode, new_size);
+ if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
+ EXT3_I(inode)->i_disksize = i_size_read(inode);
+ if (i_size_read(inode) > old_size) {
spin_unlock(&inode->i_lock);
- mark_inode_dirty(inode);
- } else {
+ mark_inode_dirty(inode);
+ } else {
spin_unlock(&inode->i_lock);
}
}
rc = 0;
if (unlikely(stats->ls_biggest_alloc_num <= idx)) {
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
- cfs_spin_lock_irqsave(&stats->ls_lock, flags);
+ spin_lock_irqsave(&stats->ls_lock, flags);
else
- cfs_spin_lock(&stats->ls_lock);
+ spin_lock(&stats->ls_lock);
if (stats->ls_biggest_alloc_num <= idx)
stats->ls_biggest_alloc_num = idx + 1;
if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
- cfs_spin_unlock_irqrestore(&stats->ls_lock,
- flags);
+ spin_unlock_irqrestore(&stats->ls_lock, flags);
} else {
- cfs_spin_unlock(&stats->ls_lock);
+ spin_unlock(&stats->ls_lock);
}
}
void obd_update_maxusage()
{
- __u64 max1, max2;
+ __u64 max1, max2;
- max1 = obd_pages_sum();
- max2 = obd_memory_sum();
+ max1 = obd_pages_sum();
+ max2 = obd_memory_sum();
- cfs_spin_lock(&obd_updatemax_lock);
- if (max1 > obd_max_pages)
- obd_max_pages = max1;
- if (max2 > obd_max_alloc)
- obd_max_alloc = max2;
- cfs_spin_unlock(&obd_updatemax_lock);
+ spin_lock(&obd_updatemax_lock);
+ if (max1 > obd_max_pages)
+ obd_max_pages = max1;
+ if (max2 > obd_max_alloc)
+ obd_max_alloc = max2;
+ spin_unlock(&obd_updatemax_lock);
}
EXPORT_SYMBOL(obd_update_maxusage);
__u64 obd_memory_max(void)
{
- __u64 ret;
+ __u64 ret;
- cfs_spin_lock(&obd_updatemax_lock);
- ret = obd_max_alloc;
- cfs_spin_unlock(&obd_updatemax_lock);
+ spin_lock(&obd_updatemax_lock);
+ ret = obd_max_alloc;
+ spin_unlock(&obd_updatemax_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL(obd_memory_max);
__u64 obd_pages_max(void)
{
- __u64 ret;
+ __u64 ret;
- cfs_spin_lock(&obd_updatemax_lock);
- ret = obd_max_pages;
- cfs_spin_unlock(&obd_updatemax_lock);
+ spin_lock(&obd_updatemax_lock);
+ ret = obd_max_pages;
+ spin_unlock(&obd_updatemax_lock);
- return ret;
+ return ret;
}
EXPORT_SYMBOL(obd_pages_max);
static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
{
- /* Don't hold error requests for replay. */
- if (req->rq_replay) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- cfs_spin_unlock(&req->rq_lock);
+ /* Don't hold error requests for replay. */
+ if (req->rq_replay) {
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
}
if (rc && req->rq_transno != 0) {
DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
return NULL;
}
- cfs_spin_lock(&req->rq_lock);
- req->rq_replay = req->rq_import->imp_replayable;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_replay = req->rq_import->imp_replayable;
+ spin_unlock(&req->rq_lock);
/* pack the intent */
lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
* be put along with freeing \var mod.
*/
ptlrpc_request_addref(req);
- cfs_spin_lock(&req->rq_lock);
- req->rq_committed = 1;
- cfs_spin_unlock(&req->rq_lock);
- req->rq_cb_data = NULL;
- obd_mod_put(mod);
+ spin_lock(&req->rq_lock);
+ req->rq_committed = 1;
+ spin_unlock(&req->rq_lock);
+ req->rq_cb_data = NULL;
+ obd_mod_put(mod);
}
int mdc_set_open_replay_data(struct obd_export *exp,
obd_mod_get(mod);
obd_mod_get(mod);
- cfs_spin_lock(&open_req->rq_lock);
- och->och_mod = mod;
- mod->mod_och = och;
- mod->mod_open_req = open_req;
- open_req->rq_cb_data = mod;
- open_req->rq_commit_cb = mdc_commit_open;
- cfs_spin_unlock(&open_req->rq_lock);
+ spin_lock(&open_req->rq_lock);
+ och->och_mod = mod;
+ mod->mod_och = och;
+ mod->mod_open_req = open_req;
+ open_req->rq_cb_data = mod;
+ open_req->rq_commit_cb = mdc_commit_open;
+ spin_unlock(&open_req->rq_lock);
}
rec->cr_fid2 = body->fid1;
DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
/* We no longer want to preserve this open for replay even
* though the open was committed. b=3632, b=3633 */
- cfs_spin_lock(&mod->mod_open_req->rq_lock);
- mod->mod_open_req->rq_replay = 0;
- cfs_spin_unlock(&mod->mod_open_req->rq_lock);
+ spin_lock(&mod->mod_open_req->rq_lock);
+ mod->mod_open_req->rq_replay = 0;
+ spin_unlock(&mod->mod_open_req->rq_lock);
} else {
CDEBUG(D_HA, "couldn't find open req; expecting close error\n");
}
DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
/* We no longer want to preserve this setattr for replay even
* though the open was committed. b=3632, b=3633 */
- cfs_spin_lock(&mod->mod_open_req->rq_lock);
- mod->mod_open_req->rq_replay = 0;
- cfs_spin_unlock(&mod->mod_open_req->rq_lock);
+ spin_lock(&mod->mod_open_req->rq_lock);
+ mod->mod_open_req->rq_replay = 0;
+ spin_unlock(&mod->mod_open_req->rq_lock);
}
mdc_close_pack(req, op_data);
* Since the request might also come from lprocfs, so we need
* sync this with client_disconnect_export Bug15684
*/
- cfs_down_read(&obd->u.cli.cl_sem);
+ down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
imp = class_import_get(obd->u.cli.cl_import);
- cfs_up_read(&obd->u.cli.cl_sem);
+ up_read(&obd->u.cli.cl_sem);
if (!imp)
RETURN(-ENODEV);
if (vallen != sizeof(int))
RETURN(-EINVAL);
- cfs_spin_lock(&imp->imp_lock);
- if (*((int *)val)) {
- imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
- imp->imp_connect_data.ocd_connect_flags |= OBD_CONNECT_RDONLY;
- } else {
- imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
- imp->imp_connect_data.ocd_connect_flags &= ~OBD_CONNECT_RDONLY;
- }
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ if (*((int *)val)) {
+ imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
+ imp->imp_connect_data.ocd_connect_flags |=
+ OBD_CONNECT_RDONLY;
+ } else {
+ imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
+ imp->imp_connect_data.ocd_connect_flags &=
+ ~OBD_CONNECT_RDONLY;
+ }
+ spin_unlock(&imp->imp_lock);
rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
keylen, key, vallen, val, set);
}
if (KEY_IS(KEY_MDS_CONN)) {
/* mds-mds import */
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_server_timeout = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_server_timeout = 1;
+ spin_unlock(&imp->imp_lock);
imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
CDEBUG(D_OTHER, "%s: timeout / 2\n", exp->exp_obd->obd_name);
RETURN(0);
struct obd_connect_data *data,
void *localdata)
{
- struct obd_import *imp = obd->u.cli.cl_import;
+ struct obd_import *imp = obd->u.cli.cl_import;
- /* mds-mds import features */
- if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_server_timeout = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ /* mds-mds import features */
+ if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
+ spin_lock(&imp->imp_lock);
+ imp->imp_server_timeout = 1;
+ spin_unlock(&imp->imp_lock);
imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
CDEBUG(D_OTHER, "%s: Set 'mds' portal and timeout\n",
obd->obd_name);
" in log "LPX64"\n", hdr->lrh_index, rec->cur_hdr.lrh_index,
rec->cur_id, rec->cur_endrec, llh->lgh_id.lgl_oid);
- cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
- mdd->mdd_cl.mc_lastuser = rec->cur_id;
- cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ spin_lock(&mdd->mdd_cl.mc_user_lock);
+ mdd->mdd_cl.mc_lastuser = rec->cur_id;
+ spin_unlock(&mdd->mdd_cl.mc_user_lock);
- return LLOG_PROC_BREAK;
+ return LLOG_PROC_BREAK;
}
static int llog_changelog_cancel_cb(const struct lu_env *env,
int rc;
mdd->mdd_cl.mc_index = 0;
- cfs_spin_lock_init(&mdd->mdd_cl.mc_lock);
+ spin_lock_init(&mdd->mdd_cl.mc_lock);
mdd->mdd_cl.mc_starttime = cfs_time_current_64();
mdd->mdd_cl.mc_flags = 0; /* off by default */
mdd->mdd_cl.mc_mask = CHANGELOG_DEFMASK;
- cfs_spin_lock_init(&mdd->mdd_cl.mc_user_lock);
+ spin_lock_init(&mdd->mdd_cl.mc_user_lock);
mdd->mdd_cl.mc_lastuser = 0;
rc = mdd_changelog_llog_init(env, mdd);
mdd2obd_dev(mdd)->obd_name);
rc = -ESRCH;
} else {
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
- mdd->mdd_cl.mc_flags |= CLM_ON;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ spin_lock(&mdd->mdd_cl.mc_lock);
+ mdd->mdd_cl.mc_flags |= CLM_ON;
+ spin_unlock(&mdd->mdd_cl.mc_lock);
rc = mdd_changelog_write_header(env, mdd, CLM_START);
- }
- } else if ((on == 0) && ((mdd->mdd_cl.mc_flags & CLM_ON) == CLM_ON)) {
- LCONSOLE_INFO("%s: changelog off\n",mdd2obd_dev(mdd)->obd_name);
+ }
+ } else if ((on == 0) && ((mdd->mdd_cl.mc_flags & CLM_ON) == CLM_ON)) {
+ LCONSOLE_INFO("%s: changelog off\n",mdd2obd_dev(mdd)->obd_name);
rc = mdd_changelog_write_header(env, mdd, CLM_FINI);
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
- mdd->mdd_cl.mc_flags &= ~CLM_ON;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
- }
- return rc;
+ spin_lock(&mdd->mdd_cl.mc_lock);
+ mdd->mdd_cl.mc_flags &= ~CLM_ON;
+ spin_unlock(&mdd->mdd_cl.mc_lock);
+ }
+ return rc;
}
/** Remove entries with indicies up to and including \a endrec from the
if (ctxt == NULL)
return -ENXIO;
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
- cur = (long long)mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ spin_lock(&mdd->mdd_cl.mc_lock);
+ cur = (long long)mdd->mdd_cl.mc_index;
+ spin_unlock(&mdd->mdd_cl.mc_lock);
if (endrec > cur)
endrec = cur;
rec->cr_hdr.lrh_len = llog_data_len(sizeof(*rec) + rec->cr.cr_namelen);
rec->cr_hdr.lrh_type = CHANGELOG_REC;
rec->cr.cr_time = cl_time();
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
+ spin_lock(&mdd->mdd_cl.mc_lock);
rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ spin_unlock(&mdd->mdd_cl.mc_lock);
ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
LASSERT(ctxt);
rec->cur_hdr.lrh_len = sizeof(*rec);
rec->cur_hdr.lrh_type = CHANGELOG_USER_REC;
- cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
- if (mdd->mdd_cl.mc_lastuser == (unsigned int)(-1)) {
- cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
- CERROR("Maximum number of changelog users exceeded!\n");
- GOTO(out, rc = -EOVERFLOW);
- }
- *id = rec->cur_id = ++mdd->mdd_cl.mc_lastuser;
- rec->cur_endrec = mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ spin_lock(&mdd->mdd_cl.mc_user_lock);
+ if (mdd->mdd_cl.mc_lastuser == (unsigned int)(-1)) {
+ spin_unlock(&mdd->mdd_cl.mc_user_lock);
+ CERROR("Maximum number of changelog users exceeded!\n");
+ GOTO(out, rc = -EOVERFLOW);
+ }
+ *id = rec->cur_id = ++mdd->mdd_cl.mc_lastuser;
+ rec->cur_endrec = mdd->mdd_cl.mc_index;
+ spin_unlock(&mdd->mdd_cl.mc_user_lock);
rc = llog_cat_add(env, ctxt->loc_handle, &rec->cur_hdr, NULL, NULL);
data.mcud_minrec = 0;
data.mcud_usercount = 0;
data.mcud_endrec = endrec;
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
- endrec = mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ spin_lock(&mdd->mdd_cl.mc_lock);
+ endrec = mdd->mdd_cl.mc_index;
+ spin_unlock(&mdd->mdd_cl.mc_lock);
if ((data.mcud_endrec == 0) ||
((data.mcud_endrec > endrec) &&
(data.mcud_endrec != MCUD_UNREGISTER)))
rec->cr_hdr.lrh_type = CHANGELOG_REC;
rec->cr.cr_time = cl_time();
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
+ spin_lock(&mdd->mdd_cl.mc_lock);
/* NB: I suppose it's possible llog_add adds out of order wrt cr_index,
* but as long as the MDD transactions are ordered correctly for e.g.
* rename conflicts, I don't think this should matter. */
rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ spin_unlock(&mdd->mdd_cl.mc_lock);
ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
if (ctxt == NULL)
rec->cr_hdr.lrh_type = CHANGELOG_REC;
rec->cr.cr_time = cl_time();
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
+ spin_lock(&mdd->mdd_cl.mc_lock);
/* NB: I suppose it's possible llog_add adds out of order wrt cr_index,
* but as long as the MDD transactions are ordered correctly for e.g.
* rename conflicts, I don't think this should matter. */
rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ spin_unlock(&mdd->mdd_cl.mc_lock);
ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
if (ctxt == NULL)
out_free:
/* The child object shouldn't be cached anymore */
if (rc)
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE,
+ set_bit(LU_OBJECT_HEARD_BANSHEE,
&child->mo_lu.lo_header->loh_flags);
return rc;
}
#define CLM_PURGE 0x40000
struct mdd_changelog {
- cfs_spinlock_t mc_lock; /* for index */
- int mc_flags;
- int mc_mask;
- __u64 mc_index;
- __u64 mc_starttime;
- cfs_spinlock_t mc_user_lock;
- int mc_lastuser;
+ spinlock_t mc_lock; /* for index */
+ int mc_flags;
+ int mc_mask;
+ __u64 mc_index;
+ __u64 mc_starttime;
+ spinlock_t mc_user_lock;
+ int mc_lastuser;
};
static inline __u64 cl_time(void) {
extern const char lfsck_bookmark_name[];
struct md_lfsck {
- cfs_mutex_t ml_mutex;
- cfs_spinlock_t ml_lock;
+ struct mutex ml_mutex;
+ spinlock_t ml_lock;
struct ptlrpc_thread ml_thread;
struct dt_object *ml_bookmark_obj;
struct dt_object *ml_it_obj;
void mdd_lfsck_set_speed(struct md_lfsck *lfsck, __u32 limit)
{
- cfs_spin_lock(&lfsck->ml_lock);
+ spin_lock(&lfsck->ml_lock);
lfsck->ml_speed_limit = limit;
if (limit != LFSCK_SPEED_NO_LIMIT) {
if (limit > CFS_HZ) {
lfsck->ml_sleep_jif = 0;
lfsck->ml_sleep_rate = 0;
}
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
}
static void mdd_lfsck_control_speed(struct md_lfsck *lfsck)
if (lfsck->ml_sleep_jif > 0 &&
lfsck->ml_new_scanned >= lfsck->ml_sleep_rate) {
- cfs_spin_lock(&lfsck->ml_lock);
+ spin_lock(&lfsck->ml_lock);
if (likely(lfsck->ml_sleep_jif > 0 &&
lfsck->ml_new_scanned >= lfsck->ml_sleep_rate)) {
lwi = LWI_TIMEOUT_INTR(lfsck->ml_sleep_jif, NULL,
LWI_ON_SIGNAL_NOOP, NULL);
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
l_wait_event(thread->t_ctl_waitq,
!thread_is_running(thread),
&lwi);
lfsck->ml_new_scanned = 0;
} else {
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
}
}
}
* every bookmark, then low layer module can decide the
* start point for current iteration. */
- cfs_spin_lock(&lfsck->ml_lock);
+ spin_lock(&lfsck->ml_lock);
thread_set_flags(thread, SVC_RUNNING);
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
/* Call iops->load() to finish the choosing start point. */
lu_env_fini(&env);
noenv:
- cfs_spin_lock(&lfsck->ml_lock);
+ spin_lock(&lfsck->ml_lock);
thread_set_flags(thread, SVC_STOPPED);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
return rc;
}
if (lfsck->ml_it_obj == NULL)
RETURN(-ENOTSUPP);
- cfs_mutex_lock(&lfsck->ml_mutex);
- cfs_spin_lock(&lfsck->ml_lock);
+ mutex_lock(&lfsck->ml_mutex);
+ spin_lock(&lfsck->ml_lock);
if (thread_is_running(thread)) {
- cfs_spin_unlock(&lfsck->ml_lock);
- cfs_mutex_unlock(&lfsck->ml_mutex);
+ spin_unlock(&lfsck->ml_lock);
+ mutex_unlock(&lfsck->ml_mutex);
RETURN(-EALREADY);
}
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
if (start->ls_valid & LSV_SPEED_LIMIT)
mdd_lfsck_set_speed(lfsck, start->ls_speed_limit);
thread_is_running(thread) ||
thread_is_stopped(thread),
&lwi);
- cfs_mutex_unlock(&lfsck->ml_mutex);
+ mutex_unlock(&lfsck->ml_mutex);
RETURN(rc < 0 ? rc : 0);
}
struct l_wait_info lwi = { 0 };
ENTRY;
- cfs_mutex_lock(&lfsck->ml_mutex);
- cfs_spin_lock(&lfsck->ml_lock);
+ mutex_lock(&lfsck->ml_mutex);
+ spin_lock(&lfsck->ml_lock);
if (thread_is_init(thread) || thread_is_stopped(thread)) {
- cfs_spin_unlock(&lfsck->ml_lock);
- cfs_mutex_unlock(&lfsck->ml_mutex);
+ spin_unlock(&lfsck->ml_lock);
+ mutex_unlock(&lfsck->ml_mutex);
RETURN(-EALREADY);
}
thread_set_flags(thread, SVC_STOPPING);
- cfs_spin_unlock(&lfsck->ml_lock);
+ spin_unlock(&lfsck->ml_lock);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
- cfs_mutex_unlock(&lfsck->ml_mutex);
+ mutex_unlock(&lfsck->ml_mutex);
RETURN(0);
}
memset(lfsck, 0, sizeof(*lfsck));
lfsck->ml_version = LFSCK_VERSION_V1;
cfs_waitq_init(&lfsck->ml_thread.t_ctl_waitq);
- cfs_mutex_init(&lfsck->ml_mutex);
- cfs_spin_lock_init(&lfsck->ml_lock);
+ mutex_init(&lfsck->ml_mutex);
+ spin_lock_init(&lfsck->ml_lock);
obj = dt_store_open(env, mdd->mdd_child, "", lfsck_bookmark_name,
&mdd_env_info(env)->mti_fid);
#else /* !MDD_DISABLE_PDO_LOCK */
#ifdef CONFIG_LOCKDEP
-static cfs_lock_class_key_t mdd_pdirop_key;
+static struct lock_class_key mdd_pdirop_key;
#define RETIP ((unsigned long)__builtin_return_address(0))
return rc;
}
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
- cur = mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ spin_lock(&mdd->mdd_cl.mc_lock);
+ cur = mdd->mdd_cl.mc_index;
+ spin_unlock(&mdd->mdd_cl.mc_lock);
cucb.count = count;
cucb.page = page;
/* Verify that our path hasn't changed since we started the lookup.
Record the current index, and verify the path resolves to the
same fid. If it does, then the path is correct as of this index. */
- cfs_spin_lock(&mdd->mdd_cl.mc_lock);
- pli->pli_currec = mdd->mdd_cl.mc_index;
- cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+ spin_lock(&mdd->mdd_cl.mc_lock);
+ pli->pli_currec = mdd->mdd_cl.mc_index;
+ spin_unlock(&mdd->mdd_cl.mc_lock);
rc = mdd_path2fid(env, mdd, ptr, &pli->pli_fid);
if (rc) {
CDEBUG(D_INFO, "mdd_path2fid(%s) failed %d\n", ptr, rc);
next = mdt->mdt_child;
rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
if (!rc) {
- cfs_spin_lock(&capa_lock);
- *bkey = *rkey;
- *rkey = *tmp;
- cfs_spin_unlock(&capa_lock);
-
- rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys);
- if (rc) {
- cfs_spin_lock(&capa_lock);
- *rkey = *bkey;
- memset(bkey, 0, sizeof(*bkey));
- cfs_spin_unlock(&capa_lock);
+ spin_lock(&capa_lock);
+ *bkey = *rkey;
+ *rkey = *tmp;
+ spin_unlock(&capa_lock);
+
+ rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys);
+ if (rc) {
+ spin_lock(&capa_lock);
+ *rkey = *bkey;
+ memset(bkey, 0, sizeof(*bkey));
+ spin_unlock(&capa_lock);
} else {
set_capa_key_expiry(mdt);
DEBUG_CAPA_KEY(D_SEC, rkey, "new");
rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
if (rc)
RETURN(rc);
- cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
+ spin_lock(&info->mti_mdt->mdt_osfs_lock);
info->mti_mdt->mdt_osfs = *osfs;
info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
- cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
+ spin_unlock(&info->mti_mdt->mdt_osfs_lock);
} else {
/** use cached statfs data */
- cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
+ spin_lock(&info->mti_mdt->mdt_osfs_lock);
*osfs = info->mti_mdt->mdt_osfs;
- cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
+ spin_unlock(&info->mti_mdt->mdt_osfs_lock);
}
- if (rc == 0)
+ if (rc == 0)
mdt_counter_incr(req, LPROC_MDT_STATFS);
- RETURN(rc);
+ RETURN(rc);
}
/**
req->rq_status = 0;
lustre_msg_set_status(req->rq_repmsg, 0);
- cfs_spin_lock(&req->rq_export->exp_lock);
- if (*(__u32 *)val)
- req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
- else
- req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
- cfs_spin_unlock(&req->rq_export->exp_lock);
+ spin_lock(&req->rq_export->exp_lock);
+ if (*(__u32 *)val)
+ req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
+ else
+ req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
+ spin_unlock(&req->rq_export->exp_lock);
} else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
struct changelog_setinfo *cs =
* the connect flags in the shared export data structure. LU-1623 */
reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
exp = req->rq_export;
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
exp->exp_connect_flags = reply->ocd_connect_flags;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
rc = mdt_init_idmap(info);
if (rc != 0)
sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
- cfs_write_lock(&m->mdt_sptlrpc_lock);
+ write_lock(&m->mdt_sptlrpc_lock);
sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
m->mdt_sptlrpc_rset = tmp_rset;
- cfs_write_unlock(&m->mdt_sptlrpc_lock);
+ write_unlock(&m->mdt_sptlrpc_lock);
return 0;
}
obd->u.obt.obt_magic = OBT_MAGIC;
}
- cfs_rwlock_init(&m->mdt_sptlrpc_lock);
+ rwlock_init(&m->mdt_sptlrpc_lock);
sptlrpc_rule_set_init(&m->mdt_sptlrpc_rset);
- cfs_spin_lock_init(&m->mdt_ioepoch_lock);
+ spin_lock_init(&m->mdt_ioepoch_lock);
m->mdt_opts.mo_compat_resname = 0;
m->mdt_opts.mo_mds_capa = 1;
m->mdt_opts.mo_oss_capa = 1;
CFS_INIT_LIST_HEAD(&m->mdt_nosquash_nids);
m->mdt_nosquash_str = NULL;
m->mdt_nosquash_strlen = 0;
- cfs_init_rwsem(&m->mdt_squash_sem);
- cfs_spin_lock_init(&m->mdt_osfs_lock);
+ init_rwsem(&m->mdt_squash_sem);
+ spin_lock_init(&m->mdt_osfs_lock);
m->mdt_osfs_age = cfs_time_shift_64(-1000);
m->mdt_md_dev.md_lu_dev.ld_ops = &mdt_lu_ops;
lu_object_init(o, h, d);
lu_object_add_top(h, o);
o->lo_ops = &mdt_obj_ops;
- cfs_mutex_init(&mo->mot_ioepoch_mutex);
- cfs_mutex_init(&mo->mot_lov_mutex);
+ mutex_init(&mo->mot_ioepoch_mutex);
+ mutex_init(&mo->mot_lov_mutex);
RETURN(o);
} else
RETURN(NULL);
if (rc)
RETURN(rc);
- LASSERT(!cfs_test_bit(MDT_FL_CFGLOG, &mdt->mdt_state));
+ LASSERT(!test_bit(MDT_FL_CFGLOG, &mdt->mdt_state));
target_recovery_init(&mdt->mdt_lut, mdt_recovery_handle);
- cfs_set_bit(MDT_FL_CFGLOG, &mdt->mdt_state);
+ set_bit(MDT_FL_CFGLOG, &mdt->mdt_state);
LASSERT(obd->obd_no_conn);
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
obd->obd_no_conn = 0;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
if (obd->obd_recovering == 0)
mdt_postrecov(env, mdt);
* connection, and it is safe to expose this flag before connection
* processing completes. */
if (data->ocd_connect_flags & OBD_CONNECT_LIGHTWEIGHT) {
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
exp->exp_connect_flags |= OBD_CONNECT_LIGHTWEIGHT;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
}
data->ocd_version = LUSTRE_VERSION_CODE;
}
static int mdt_connect_check_sptlrpc(struct mdt_device *mdt,
- struct obd_export *exp,
- struct ptlrpc_request *req)
+ struct obd_export *exp,
+ struct ptlrpc_request *req)
{
- struct sptlrpc_flavor flvr;
- int rc = 0;
+ struct sptlrpc_flavor flvr;
+ int rc = 0;
- if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
- cfs_read_lock(&mdt->mdt_sptlrpc_lock);
- sptlrpc_target_choose_flavor(&mdt->mdt_sptlrpc_rset,
- req->rq_sp_from,
- req->rq_peer.nid,
- &flvr);
- cfs_read_unlock(&mdt->mdt_sptlrpc_lock);
+ if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
+ read_lock(&mdt->mdt_sptlrpc_lock);
+ sptlrpc_target_choose_flavor(&mdt->mdt_sptlrpc_rset,
+ req->rq_sp_from,
+ req->rq_peer.nid,
+ &flvr);
+ read_unlock(&mdt->mdt_sptlrpc_lock);
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
rc = -EACCES;
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
} else {
if (exp->exp_sp_peer != req->rq_sp_from) {
CERROR("RPC source %s doesn't match %s\n",
* XXX: probably not very appropriate method is used now
* at some point we should find a better one
*/
- if (!cfs_test_bit(MDT_FL_SYNCED, &mdt->mdt_state)) {
+ if (!test_bit(MDT_FL_SYNCED, &mdt->mdt_state)) {
rc = obd_health_check(env, mdt->mdt_child_exp->exp_obd);
if (rc)
RETURN(-EAGAIN);
- cfs_set_bit(MDT_FL_SYNCED, &mdt->mdt_state);
+ set_bit(MDT_FL_SYNCED, &mdt->mdt_state);
}
rc = class_connect(&conn, obd, cluuid);
int rc = 0;
ENTRY;
- cfs_spin_lock(&med->med_open_lock);
- while (!cfs_list_empty(&med->med_open_head)) {
- cfs_list_t *tmp = med->med_open_head.next;
- mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list);
+ spin_lock(&med->med_open_lock);
+ while (!cfs_list_empty(&med->med_open_head)) {
+ cfs_list_t *tmp = med->med_open_head.next;
+ mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list);
- /* Remove mfd handle so it can't be found again.
- * We are consuming the mfd_list reference here. */
- class_handle_unhash(&mfd->mfd_handle);
- cfs_list_move_tail(&mfd->mfd_list, &closing_list);
- }
- cfs_spin_unlock(&med->med_open_lock);
+ /* Remove mfd handle so it can't be found again.
+ * We are consuming the mfd_list reference here. */
+ class_handle_unhash(&mfd->mfd_handle);
+ cfs_list_move_tail(&mfd->mfd_list, &closing_list);
+ }
+ spin_unlock(&med->med_open_lock);
mdt = mdt_dev(obd->obd_lu_dev);
LASSERT(mdt != NULL);
ENTRY;
CFS_INIT_LIST_HEAD(&med->med_open_head);
- cfs_spin_lock_init(&med->med_open_lock);
- cfs_mutex_init(&med->med_idmap_mutex);
- med->med_idmap = NULL;
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_connecting = 1;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_lock_init(&med->med_open_lock);
+ mutex_init(&med->med_idmap_mutex);
+ med->med_idmap = NULL;
+ spin_lock(&exp->exp_lock);
+ exp->exp_connecting = 1;
+ spin_unlock(&exp->exp_lock);
/* self-export doesn't need client data and ldlm initialization */
if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
/* There is race condition:
* "uc_upcall" was changed just after "is_identity_get_disabled" check.
*/
- cfs_read_lock(&cache->uc_upcall_rwlock);
+ read_lock(&cache->uc_upcall_rwlock);
CDEBUG(D_INFO, "The upcall is: '%s'\n", cache->uc_upcall);
if (unlikely(!strcmp(cache->uc_upcall, "NONE"))) {
}
EXIT;
out:
- cfs_read_unlock(&cache->uc_upcall_rwlock);
+ read_unlock(&cache->uc_upcall_rwlock);
return rc;
}
ENTRY;
if (exp_connect_rmtclient(exp)) {
- cfs_mutex_lock(&med->med_idmap_mutex);
+ mutex_lock(&med->med_idmap_mutex);
if (!med->med_idmap)
med->med_idmap = lustre_idmap_init();
- cfs_mutex_unlock(&med->med_idmap_mutex);
+ mutex_unlock(&med->med_idmap_mutex);
if (IS_ERR(med->med_idmap)) {
long err = PTR_ERR(med->med_idmap);
void mdt_cleanup_idmap(struct mdt_export_data *med)
{
- cfs_mutex_lock(&med->med_idmap_mutex);
+ mutex_lock(&med->med_idmap_mutex);
if (med->med_idmap != NULL) {
lustre_idmap_fini(med->med_idmap);
med->med_idmap = NULL;
}
- cfs_mutex_unlock(&med->med_idmap_mutex);
+ mutex_unlock(&med->med_idmap_mutex);
}
static inline void mdt_revoke_export_locks(struct obd_export *exp)
/* mdt state flags */
unsigned long mdt_state;
/* lock to protect IOepoch */
- cfs_spinlock_t mdt_ioepoch_lock;
+ spinlock_t mdt_ioepoch_lock;
__u64 mdt_ioepoch;
/* transaction callbacks */
struct upcall_cache *mdt_identity_cache;
/* sptlrpc rules */
- cfs_rwlock_t mdt_sptlrpc_lock;
+ rwlock_t mdt_sptlrpc_lock;
struct sptlrpc_rule_set mdt_sptlrpc_rset;
/* capability keys */
mdt_som_conf:1;
/* statfs optimization: we cache a bit */
- struct obd_statfs mdt_osfs;
- __u64 mdt_osfs_age;
- cfs_spinlock_t mdt_osfs_lock;
+ struct obd_statfs mdt_osfs;
+ __u64 mdt_osfs_age;
+ spinlock_t mdt_osfs_lock;
/* root squash */
uid_t mdt_squash_uid;
cfs_list_t mdt_nosquash_nids;
char *mdt_nosquash_str;
int mdt_nosquash_strlen;
- cfs_rw_semaphore_t mdt_squash_sem;
+ struct rw_semaphore mdt_squash_sem;
cfs_proc_dir_entry_t *mdt_proc_entry;
struct lprocfs_stats *mdt_stats;
int mot_ioepoch_count;
int mot_writecount;
/* Lock to protect object's IO epoch. */
- cfs_mutex_t mot_ioepoch_mutex;
+ struct mutex mot_ioepoch_mutex;
/* Lock to protect create_data */
- cfs_mutex_t mot_lov_mutex;
+ struct mutex mot_lov_mutex;
};
enum mdt_object_flags {
}
}
-static int match_nosquash_list(cfs_rw_semaphore_t *sem,
- cfs_list_t *nidlist,
- lnet_nid_t peernid)
+static int match_nosquash_list(struct rw_semaphore *sem,
+ cfs_list_t *nidlist,
+ lnet_nid_t peernid)
{
- int rc;
- ENTRY;
- cfs_down_read(sem);
- rc = cfs_match_nid(peernid, nidlist);
- cfs_up_read(sem);
- RETURN(rc);
+ int rc;
+ ENTRY;
+ down_read(sem);
+ rc = cfs_match_nid(peernid, nidlist);
+ up_read(sem);
+ RETURN(rc);
}
/* root_squash for inter-MDS operations */
static int lproc_mdt_attach_rename_seqstat(struct mdt_device *mdt)
{
- struct lu_device *ld = &mdt->mdt_md_dev.md_lu_dev;
- struct obd_device *obd = ld->ld_obd;
- int i;
+ struct lu_device *ld = &mdt->mdt_md_dev.md_lu_dev;
+ struct obd_device *obd = ld->ld_obd;
+ int i;
- for (i = 0; i < RENAME_LAST; i++)
- spin_lock_init(&mdt->mdt_rename_stats.hist[i].oh_lock);
+ for (i = 0; i < RENAME_LAST; i++)
+ spin_lock_init(&mdt->mdt_rename_stats.hist[i].oh_lock);
- return lprocfs_obd_seq_create(obd, "rename_stats", 0444,
- &mdt_rename_stats_fops, mdt);
+ return lprocfs_obd_seq_create(obd, "rename_stats", 0444,
+ &mdt_rename_stats_fops, mdt);
}
void mdt_rename_counter_tally(struct mdt_thread_info *info,
struct upcall_cache *hash = mdt->mdt_identity_cache;
int len;
- *eof = 1;
- cfs_read_lock(&hash->uc_upcall_rwlock);
- len = snprintf(page, count, "%s\n", hash->uc_upcall);
- cfs_read_unlock(&hash->uc_upcall_rwlock);
- return len;
+ *eof = 1;
+ read_lock(&hash->uc_upcall_rwlock);
+ len = snprintf(page, count, "%s\n", hash->uc_upcall);
+ read_unlock(&hash->uc_upcall_rwlock);
+ return len;
}
static int lprocfs_wr_identity_upcall(struct file *file, const char *buffer,
GOTO(failed, rc = -EFAULT);
/* Remove any extraneous bits from the upcall (e.g. linefeeds) */
- cfs_write_lock(&hash->uc_upcall_rwlock);
- sscanf(kernbuf, "%s", hash->uc_upcall);
- cfs_write_unlock(&hash->uc_upcall_rwlock);
+ write_lock(&hash->uc_upcall_rwlock);
+ sscanf(kernbuf, "%s", hash->uc_upcall);
+ write_unlock(&hash->uc_upcall_rwlock);
if (strcmp(hash->uc_name, obd->obd_name) != 0)
CWARN("%s: write to upcall name %s\n",
if (!strcmp(kernbuf, "NONE") || !strcmp(kernbuf, "clear")) {
/* empty string is special case */
- cfs_down_write(&mdt->mdt_squash_sem);
+ down_write(&mdt->mdt_squash_sem);
if (!cfs_list_empty(&mdt->mdt_nosquash_nids)) {
cfs_free_nidlist(&mdt->mdt_nosquash_nids);
OBD_FREE(mdt->mdt_nosquash_str,
mdt->mdt_nosquash_str = NULL;
mdt->mdt_nosquash_strlen = 0;
}
- cfs_up_write(&mdt->mdt_squash_sem);
+ up_write(&mdt->mdt_squash_sem);
LCONSOLE_INFO("%s: nosquash_nids is cleared\n",
obd->obd_name);
OBD_FREE(kernbuf, count + 1);
GOTO(failed, rc = -EINVAL);
}
- cfs_down_write(&mdt->mdt_squash_sem);
+ down_write(&mdt->mdt_squash_sem);
if (!cfs_list_empty(&mdt->mdt_nosquash_nids)) {
cfs_free_nidlist(&mdt->mdt_nosquash_nids);
OBD_FREE(mdt->mdt_nosquash_str, mdt->mdt_nosquash_strlen);
LCONSOLE_INFO("%s: nosquash_nids is set to %s\n",
obd->obd_name, kernbuf);
- cfs_up_write(&mdt->mdt_squash_sem);
+ up_write(&mdt->mdt_squash_sem);
RETURN(count);
failed:
ma->ma_need = MA_INODE | MA_LOV;
ma->ma_valid = 0;
- cfs_mutex_lock(&o->mot_lov_mutex);
+ mutex_lock(&o->mot_lov_mutex);
if (!(o->mot_flags & MOF_LOV_CREATED)) {
rc = mdo_create_data(info->mti_env,
p ? mdt_object_child(p) : NULL,
if (rc == 0 && ma->ma_valid & MA_LOV)
o->mot_flags |= MOF_LOV_CREATED;
}
- cfs_mutex_unlock(&o->mot_lov_mutex);
+ mutex_unlock(&o->mot_lov_mutex);
RETURN(rc);
}
!S_ISREG(lu_object_attr(&o->mot_obj.mo_lu)))
RETURN(0);
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
- if (mdt_ioepoch_opened(o)) {
- /* Epoch continues even if there is no writers yet. */
- CDEBUG(D_INODE, "continue epoch "LPU64" for "DFID"\n",
- o->mot_ioepoch, PFID(mdt_object_fid(o)));
- } else {
- /* XXX: ->mdt_ioepoch is not initialized at the mount */
- cfs_spin_lock(&mdt->mdt_ioepoch_lock);
+ mutex_lock(&o->mot_ioepoch_mutex);
+ if (mdt_ioepoch_opened(o)) {
+ /* Epoch continues even if there is no writers yet. */
+ CDEBUG(D_INODE, "continue epoch "LPU64" for "DFID"\n",
+ o->mot_ioepoch, PFID(mdt_object_fid(o)));
+ } else {
+ /* XXX: ->mdt_ioepoch is not initialized at the mount */
+ spin_lock(&mdt->mdt_ioepoch_lock);
if (mdt->mdt_ioepoch < info->mti_replayepoch)
mdt->mdt_ioepoch = info->mti_replayepoch;
else
o->mot_ioepoch = mdt->mdt_ioepoch;
- cfs_spin_unlock(&mdt->mdt_ioepoch_lock);
+ spin_unlock(&mdt->mdt_ioepoch_lock);
- CDEBUG(D_INODE, "starting epoch "LPU64" for "DFID"\n",
- o->mot_ioepoch, PFID(mdt_object_fid(o)));
- if (created)
- o->mot_flags |= MOF_SOM_CREATED;
- cancel = 1;
- }
- o->mot_ioepoch_count++;
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ CDEBUG(D_INODE, "starting epoch "LPU64" for "DFID"\n",
+ o->mot_ioepoch, PFID(mdt_object_fid(o)));
+ if (created)
+ o->mot_flags |= MOF_SOM_CREATED;
+ cancel = 1;
+ }
+ o->mot_ioepoch_count++;
+ mutex_unlock(&o->mot_ioepoch_mutex);
/* Cancel Size-on-MDS attributes cached on clients for the open case.
* In the truncate case, see mdt_reint_setattr(). */
{
int rc = 0;
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
CDEBUG(D_INODE, "Eviction. Closing IOepoch "LPU64" on "DFID". "
"Count %d\n", o->mot_ioepoch, PFID(mdt_object_fid(o)),
o->mot_ioepoch_count);
rc = mdt_som_attr_set(info, o, o->mot_ioepoch, MDT_SOM_DISABLE);
mdt_object_som_enable(o, o->mot_ioepoch);
}
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
int rc = MDT_IOEPOCH_CLOSED;
ENTRY;
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
CDEBUG(D_INODE, "Replay. Closing epoch "LPU64" on "DFID". Count %d\n",
o->mot_ioepoch, PFID(mdt_object_fid(o)), o->mot_ioepoch_count);
o->mot_ioepoch_count--;
if (!mdt_ioepoch_opened(o))
mdt_object_som_enable(o, info->mti_ioepoch->ioepoch);
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
la = &info->mti_attr.ma_attr;
achange = (info->mti_ioepoch->flags & MF_SOM_CHANGE);
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
o->mot_ioepoch_count--;
tmp_ma = &info->mti_u.som.attr;
mdt_object_som_enable(o, o->mot_ioepoch);
}
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
/* If recovery is needed, tell the client to perform GETATTR under
* the lock. */
if (ret == MDT_IOEPOCH_GETATTR && recovery) {
RETURN(rc ? : ret);
error_up:
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
return rc;
}
!(info->mti_attr.ma_attr.la_valid & LA_SIZE)))
act = MDT_SOM_DISABLE;
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
/* Mark the object it is the recovery state if we failed to obtain
* SOM attributes. */
if (act == MDT_SOM_DISABLE)
rc = mdt_som_attr_set(info, o, ioepoch, act);
mdt_object_som_enable(o, ioepoch);
}
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
{
int rc = 0;
ENTRY;
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
rc = o->mot_writecount;
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
{
int rc = 0;
ENTRY;
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
if (o->mot_writecount < 0)
rc = -ETXTBSY;
else
o->mot_writecount++;
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
void mdt_write_put(struct mdt_object *o)
{
ENTRY;
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
o->mot_writecount--;
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
EXIT;
}
{
int rc = 0;
ENTRY;
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
if (o->mot_writecount > 0)
rc = -ETXTBSY;
else
o->mot_writecount--;
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
RETURN(rc);
}
static void mdt_write_allow(struct mdt_object *o)
{
ENTRY;
- cfs_mutex_lock(&o->mot_ioepoch_mutex);
+ mutex_lock(&o->mot_ioepoch_mutex);
o->mot_writecount++;
- cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+ mutex_unlock(&o->mot_ioepoch_mutex);
EXIT;
}
if (lustre_msg_get_transno(req->rq_repmsg) != 0)
RETURN_EXIT;
- cfs_spin_lock(&mdt->mdt_lut.lut_translock);
+ spin_lock(&mdt->mdt_lut.lut_translock);
if (rc != 0) {
if (info->mti_transno != 0) {
struct obd_export *exp = req->rq_export;
RETURN_EXIT;
}
} else if (info->mti_transno == 0) {
- info->mti_transno = ++ mdt->mdt_lut.lut_last_transno;
- } else {
- /* should be replay */
- if (info->mti_transno > mdt->mdt_lut.lut_last_transno)
- mdt->mdt_lut.lut_last_transno = info->mti_transno;
+ info->mti_transno = ++mdt->mdt_lut.lut_last_transno;
+ } else {
+ /* should be replay */
+ if (info->mti_transno > mdt->mdt_lut.lut_last_transno)
+ mdt->mdt_lut.lut_last_transno = info->mti_transno;
}
- cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+ spin_unlock(&mdt->mdt_lut.lut_translock);
CDEBUG(D_INODE, "transno = "LPU64", last_committed = "LPU64"\n",
info->mti_transno,
/* update lcd in memory only for resent cases */
ted = &req->rq_export->exp_target_data;
LASSERT(ted);
- cfs_mutex_lock(&ted->ted_lcd_lock);
+ mutex_lock(&ted->ted_lcd_lock);
lcd = ted->ted_lcd;
if (info->mti_transno < lcd->lcd_last_transno &&
info->mti_transno != 0) {
* last rcvd info if replay req transno < last transno,
* otherwise the following resend(after replay) can not
* be checked correctly by xid */
- cfs_mutex_unlock(&ted->ted_lcd_lock);
+ mutex_unlock(&ted->ted_lcd_lock);
CDEBUG(D_HA, "%s: transno = "LPU64" < last_transno = "LPU64"\n",
mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name,
info->mti_transno, lcd->lcd_last_transno);
lcd->lcd_last_result = rc;
lcd->lcd_last_data = info->mti_opdata;
}
- cfs_mutex_unlock(&ted->ted_lcd_lock);
+ mutex_unlock(&ted->ted_lcd_lock);
EXIT;
}
"cookie=" LPX64"\n", mfd,
PFID(mdt_object_fid(mfd->mfd_object)),
info->mti_rr.rr_handle->cookie);
- cfs_spin_lock(&med->med_open_lock);
- class_handle_unhash(&old_mfd->mfd_handle);
- cfs_list_del_init(&old_mfd->mfd_list);
- cfs_spin_unlock(&med->med_open_lock);
+ spin_lock(&med->med_open_lock);
+ class_handle_unhash(&old_mfd->mfd_handle);
+ cfs_list_del_init(&old_mfd->mfd_list);
+ spin_unlock(&med->med_open_lock);
/* no attr update for that close */
la->la_valid = 0;
ma->ma_valid |= MA_FLAGS;
repbody->handle.cookie = mfd->mfd_handle.h_cookie;
if (req->rq_export->exp_disconnected) {
- cfs_spin_lock(&med->med_open_lock);
- class_handle_unhash(&mfd->mfd_handle);
- cfs_list_del_init(&mfd->mfd_list);
- cfs_spin_unlock(&med->med_open_lock);
- mdt_mfd_close(info, mfd);
- } else {
- cfs_spin_lock(&med->med_open_lock);
- cfs_list_add(&mfd->mfd_list, &med->med_open_head);
- cfs_spin_unlock(&med->med_open_lock);
+ spin_lock(&med->med_open_lock);
+ class_handle_unhash(&mfd->mfd_handle);
+ cfs_list_del_init(&mfd->mfd_list);
+ spin_unlock(&med->med_open_lock);
+ mdt_mfd_close(info, mfd);
+ } else {
+ spin_lock(&med->med_open_lock);
+ cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+ spin_unlock(&med->med_open_lock);
}
mdt_empty_transno(info, rc);
mfd = NULL;
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
- cfs_spin_lock(&med->med_open_lock);
- cfs_list_for_each(t, &med->med_open_head) {
- mfd = cfs_list_entry(t, struct mdt_file_data, mfd_list);
- if (mfd->mfd_xid == req->rq_xid) {
- break;
- }
- mfd = NULL;
- }
- cfs_spin_unlock(&med->med_open_lock);
+ spin_lock(&med->med_open_lock);
+ cfs_list_for_each(t, &med->med_open_head) {
+ mfd = cfs_list_entry(t, struct mdt_file_data, mfd_list);
+ if (mfd->mfd_xid == req->rq_xid)
+ break;
+ mfd = NULL;
+ }
+ spin_unlock(&med->med_open_lock);
if (mfd != NULL) {
repbody->handle.cookie = mfd->mfd_handle.h_cookie;
LASSERT(mdt_info_req(info));
med = &mdt_info_req(info)->rq_export->exp_mdt_data;
- cfs_spin_lock(&med->med_open_lock);
- cfs_list_add(&mfd->mfd_list, &med->med_open_head);
- class_handle_hash_back(&mfd->mfd_handle);
- cfs_spin_unlock(&med->med_open_lock);
+ spin_lock(&med->med_open_lock);
+ cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+ class_handle_hash_back(&mfd->mfd_handle);
+ spin_unlock(&med->med_open_lock);
if (ret == MDT_IOEPOCH_OPENED) {
ret = 0;
}
med = &req->rq_export->exp_mdt_data;
- cfs_spin_lock(&med->med_open_lock);
- mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
- if (mdt_mfd_closed(mfd)) {
- cfs_spin_unlock(&med->med_open_lock);
- CDEBUG(D_INODE, "no handle for file close: fid = "DFID
- ": cookie = "LPX64"\n", PFID(info->mti_rr.rr_fid1),
- info->mti_ioepoch->handle.cookie);
- /** not serious error since bug 3633 */
- rc = -ESTALE;
- } else {
- class_handle_unhash(&mfd->mfd_handle);
- cfs_list_del_init(&mfd->mfd_list);
- cfs_spin_unlock(&med->med_open_lock);
+ spin_lock(&med->med_open_lock);
+ mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
+ if (mdt_mfd_closed(mfd)) {
+ spin_unlock(&med->med_open_lock);
+ CDEBUG(D_INODE, "no handle for file close: fid = "DFID
+ ": cookie = "LPX64"\n", PFID(info->mti_rr.rr_fid1),
+ info->mti_ioepoch->handle.cookie);
+ /** not serious error since bug 3633 */
+ rc = -ESTALE;
+ } else {
+ class_handle_unhash(&mfd->mfd_handle);
+ cfs_list_del_init(&mfd->mfd_list);
+ spin_unlock(&med->med_open_lock);
/* Do not lose object before last unlink. */
o = mfd->mfd_object;
RETURN(lustre_msg_get_status(req->rq_repmsg));
med = &info->mti_exp->exp_mdt_data;
- cfs_spin_lock(&med->med_open_lock);
- mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
- if (mfd == NULL) {
- cfs_spin_unlock(&med->med_open_lock);
+ spin_lock(&med->med_open_lock);
+ mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
+ if (mfd == NULL) {
+ spin_unlock(&med->med_open_lock);
CDEBUG(D_INODE, "no handle for done write: fid = "DFID
": cookie = "LPX64" ioepoch = "LPU64"\n",
PFID(info->mti_rr.rr_fid1),
mfd->mfd_mode == MDS_FMODE_TRUNC);
class_handle_unhash(&mfd->mfd_handle);
cfs_list_del_init(&mfd->mfd_list);
- cfs_spin_unlock(&med->med_open_lock);
+ spin_unlock(&med->med_open_lock);
/* Set EPOCH CLOSE flag if not set by client. */
info->mti_ioepoch->flags |= MF_EPOCH_CLOSE;
LASSERTF(rc == 0, "rc = %d\n", rc);
/* VBR: set export last committed version */
exp->exp_last_committed = last_transno;
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_connecting = 0;
- exp->exp_in_recovery = 0;
- cfs_spin_unlock(&exp->exp_lock);
- obd->obd_max_recoverable_clients++;
- class_export_put(exp);
-
- CDEBUG(D_OTHER, "client at idx %d has last_transno="LPU64"\n",
- cl_idx, last_transno);
- /* protect __u64 value update */
- cfs_spin_lock(&mdt->mdt_lut.lut_translock);
- mdt->mdt_lut.lut_last_transno = max(last_transno,
- mdt->mdt_lut.lut_last_transno);
- cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+ spin_lock(&exp->exp_lock);
+ exp->exp_connecting = 0;
+ exp->exp_in_recovery = 0;
+ spin_unlock(&exp->exp_lock);
+ obd->obd_max_recoverable_clients++;
+ class_export_put(exp);
+
+ CDEBUG(D_OTHER, "client at idx %d has last_transno ="LPU64"\n",
+ cl_idx, last_transno);
+ /* protect __u64 value update */
+ spin_lock(&mdt->mdt_lut.lut_translock);
+ mdt->mdt_lut.lut_last_transno = max(last_transno,
+ mdt->mdt_lut.lut_last_transno);
+ spin_unlock(&mdt->mdt_lut.lut_translock);
}
err_client:
lsd->lsd_feature_incompat |= OBD_INCOMPAT_FID;
- cfs_spin_lock(&mdt->mdt_lut.lut_translock);
- mdt->mdt_lut.lut_last_transno = lsd->lsd_last_transno;
- cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+ spin_lock(&mdt->mdt_lut.lut_translock);
+ mdt->mdt_lut.lut_last_transno = lsd->lsd_last_transno;
+ spin_unlock(&mdt->mdt_lut.lut_translock);
CDEBUG(D_INODE, "========BEGIN DUMPING LAST_RCVD========\n");
CDEBUG(D_INODE, "%s: server last_transno: "LPU64"\n",
if (rc)
GOTO(err_client, rc);
- cfs_spin_lock(&mdt->mdt_lut.lut_translock);
- /* obd_last_committed is used for compatibility
- * with other lustre recovery code */
- obd->obd_last_committed = mdt->mdt_lut.lut_last_transno;
- cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+ spin_lock(&mdt->mdt_lut.lut_translock);
+ /* obd_last_committed is used for compatibility
+ * with other lustre recovery code */
+ obd->obd_last_committed = mdt->mdt_lut.lut_last_transno;
+ spin_unlock(&mdt->mdt_lut.lut_translock);
obd->u.obt.obt_mount_count = mount_count + 1;
obd->u.obt.obt_instance = (__u32)obd->u.obt.obt_mount_count;
ted = &req->rq_export->exp_target_data;
LASSERT(ted);
- cfs_mutex_lock(&ted->ted_lcd_lock);
- lcd = ted->ted_lcd;
- /* if the export has already been disconnected, we have no last_rcvd slot,
- * update server data with latest transno then */
- if (lcd == NULL) {
- cfs_mutex_unlock(&ted->ted_lcd_lock);
+ mutex_lock(&ted->ted_lcd_lock);
+ lcd = ted->ted_lcd;
+ /* if the export has already been disconnected, we have no last_rcvd
+ * slot, update server data with latest transno then */
+ if (lcd == NULL) {
+ mutex_unlock(&ted->ted_lcd_lock);
CWARN("commit transaction for disconnected client %s: rc %d\n",
req->rq_export->exp_client_uuid.uuid, rc);
err = tgt_server_data_write(mti->mti_env, &mdt->mdt_lut, th);
lcd->lcd_last_close_transno,
mti->mti_transno, req_is_replay(req));
if (req_is_replay(req)) {
- cfs_spin_lock(&req->rq_export->exp_lock);
- req->rq_export->exp_vbr_failed = 1;
- cfs_spin_unlock(&req->rq_export->exp_lock);
- }
- cfs_mutex_unlock(&ted->ted_lcd_lock);
+ spin_lock(&req->rq_export->exp_lock);
+ req->rq_export->exp_vbr_failed = 1;
+ spin_unlock(&req->rq_export->exp_lock);
+ }
+ mutex_unlock(&ted->ted_lcd_lock);
RETURN(req_is_replay(req) ? -EOVERFLOW : 0);
}
lcd->lcd_last_close_transno = mti->mti_transno;
lcd->lcd_last_transno,
mti->mti_transno, req_is_replay(req));
if (req_is_replay(req)) {
- cfs_spin_lock(&req->rq_export->exp_lock);
- req->rq_export->exp_vbr_failed = 1;
- cfs_spin_unlock(&req->rq_export->exp_lock);
- }
- cfs_mutex_unlock(&ted->ted_lcd_lock);
+ spin_lock(&req->rq_export->exp_lock);
+ req->rq_export->exp_vbr_failed = 1;
+ spin_unlock(&req->rq_export->exp_lock);
+ }
+ mutex_unlock(&ted->ted_lcd_lock);
RETURN(req_is_replay(req) ? -EOVERFLOW : 0);
}
lcd->lcd_last_transno = mti->mti_transno;
struct lu_target *tg = &mdt->mdt_lut;
bool update = false;
- cfs_mutex_unlock(&ted->ted_lcd_lock);
+ mutex_unlock(&ted->ted_lcd_lock);
err = 0;
/* All operations performed by LW clients are synchronous and
* we store the committed transno in the last_rcvd header */
- cfs_spin_lock(&tg->lut_translock);
+ spin_lock(&tg->lut_translock);
if (mti->mti_transno > tg->lut_lsd.lsd_last_transno) {
tg->lut_lsd.lsd_last_transno = mti->mti_transno;
update = true;
}
- cfs_spin_unlock(&tg->lut_translock);
+ spin_unlock(&tg->lut_translock);
if (update)
err = tgt_server_data_write(mti->mti_env, tg, th);
} else if (off <= 0) {
CERROR("%s: client idx %d has offset %lld\n",
mdt2obd_dev(mdt)->obd_name, ted->ted_lr_idx, off);
- cfs_mutex_unlock(&ted->ted_lcd_lock);
+ mutex_unlock(&ted->ted_lcd_lock);
err = -EINVAL;
} else {
err = tgt_client_data_write(mti->mti_env, &mdt->mdt_lut, lcd,
&off, th);
- cfs_mutex_unlock(&ted->ted_lcd_lock);
- }
- RETURN(err);
+ mutex_unlock(&ted->ted_lcd_lock);
+ }
+ RETURN(err);
}
extern struct lu_context_key mdt_thread_key;
}
mti->mti_has_trans = 1;
- cfs_spin_lock(&mdt->mdt_lut.lut_translock);
+ spin_lock(&mdt->mdt_lut.lut_translock);
if (txn->th_result != 0) {
if (mti->mti_transno != 0) {
CERROR("Replay transno "LPU64" failed: rc %d\n",
if (mti->mti_transno > mdt->mdt_lut.lut_last_transno)
mdt->mdt_lut.lut_last_transno = mti->mti_transno;
}
- cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+ spin_unlock(&mdt->mdt_lut.lut_translock);
/* sometimes the reply message has not been successfully packed */
LASSERT(req != NULL && req->rq_repmsg != NULL);
int i;
/* CAVEAT EMPTOR: spinlock order */
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
cfs_list_for_each (tmp, &exp->exp_outstanding_replies) {
oldrep = cfs_list_entry(tmp, struct ptlrpc_reply_state,
rs_exp_list);
oldrep->rs_opc);
svcpt = oldrep->rs_svcpt;
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
cfs_list_del_init (&oldrep->rs_exp_list);
oldrep->rs_nlocks = 0;
DEBUG_REQ(D_HA, req, "stole locks for");
- cfs_spin_lock(&oldrep->rs_lock);
- ptlrpc_schedule_difficult_reply (oldrep);
- cfs_spin_unlock(&oldrep->rs_lock);
-
- cfs_spin_unlock(&svcpt->scp_rep_lock);
- break;
- }
- cfs_spin_unlock(&exp->exp_lock);
+ spin_lock(&oldrep->rs_lock);
+ ptlrpc_schedule_difficult_reply(oldrep);
+ spin_unlock(&oldrep->rs_lock);
+
+ spin_unlock(&svcpt->scp_rep_lock);
+ break;
+ }
+ spin_unlock(&exp->exp_lock);
}
/**
repbody = req_capsule_server_get(mti->mti_pill, &RMF_MDT_BODY);
repbody->ioepoch = obj->mot_ioepoch;
- cfs_spin_lock(&med->med_open_lock);
- cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
- if (mfd->mfd_xid == req->rq_xid)
- break;
- }
- LASSERT(&mfd->mfd_list != &med->med_open_head);
- cfs_spin_unlock(&med->med_open_lock);
- repbody->handle.cookie = mfd->mfd_handle.h_cookie;
- }
+ spin_lock(&med->med_open_lock);
+ cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+ if (mfd->mfd_xid == req->rq_xid)
+ break;
+ }
+ LASSERT(&mfd->mfd_list != &med->med_open_head);
+ spin_unlock(&med->med_open_lock);
+ repbody->handle.cookie = mfd->mfd_handle.h_cookie;
+ }
- mdt_object_put(mti->mti_env, obj);
+ mdt_object_put(mti->mti_env, obj);
}
typedef void (*mdt_reconstructor)(struct mdt_thread_info *mti,
/** Sanity check for malformed buffers */
if (pre_ver == NULL) {
CERROR("No versions in request buffer\n");
- cfs_spin_lock(&req->rq_export->exp_lock);
- req->rq_export->exp_vbr_failed = 1;
- cfs_spin_unlock(&req->rq_export->exp_lock);
- RETURN(-EOVERFLOW);
- } else if (pre_ver[idx] != version) {
- CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
- pre_ver[idx], version);
- cfs_spin_lock(&req->rq_export->exp_lock);
- req->rq_export->exp_vbr_failed = 1;
- cfs_spin_unlock(&req->rq_export->exp_lock);
- RETURN(-EOVERFLOW);
- }
- RETURN(0);
+ spin_lock(&req->rq_export->exp_lock);
+ req->rq_export->exp_vbr_failed = 1;
+ spin_unlock(&req->rq_export->exp_lock);
+ RETURN(-EOVERFLOW);
+ } else if (pre_ver[idx] != version) {
+ CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
+ pre_ver[idx], version);
+ spin_lock(&req->rq_export->exp_lock);
+ req->rq_export->exp_vbr_failed = 1;
+ spin_unlock(&req->rq_export->exp_lock);
+ RETURN(-EOVERFLOW);
+ }
+ RETURN(0);
}
/**
mfd->mfd_object = mo;
mfd->mfd_xid = req->rq_xid;
- cfs_spin_lock(&med->med_open_lock);
- cfs_list_add(&mfd->mfd_list, &med->med_open_head);
- cfs_spin_unlock(&med->med_open_lock);
+ spin_lock(&med->med_open_lock);
+ cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+ spin_unlock(&med->med_open_lock);
repbody->handle.cookie = mfd->mfd_handle.h_cookie;
}
LASSERT(mdt_conn_flags(info) & OBD_CONNECT_SOM);
LASSERT(info->mti_ioepoch);
- cfs_spin_lock(&med->med_open_lock);
- mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
- if (mfd == NULL) {
- cfs_spin_unlock(&med->med_open_lock);
+ spin_lock(&med->med_open_lock);
+ mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
+ if (mfd == NULL) {
+ spin_unlock(&med->med_open_lock);
CDEBUG(D_INODE, "no handle for file close: "
"fid = "DFID": cookie = "LPX64"\n",
PFID(info->mti_rr.rr_fid1),
class_handle_unhash(&mfd->mfd_handle);
cfs_list_del_init(&mfd->mfd_list);
- cfs_spin_unlock(&med->med_open_lock);
+ spin_unlock(&med->med_open_lock);
mdt_mfd_close(info, mfd);
} else if ((ma->ma_valid & MA_INODE) && ma->ma_attr.la_valid) {
/* spinlock to make sure no item with 0 refcount in the list */
if (cfs_atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
cfs_list_del(&cld->cld_list_chain);
- cfs_spin_unlock(&config_list_lock);
+ spin_unlock(&config_list_lock);
CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
LASSERT(logname != NULL);
instance = cfg ? cfg->cfg_instance : NULL;
- cfs_spin_lock(&config_list_lock);
+ spin_lock(&config_list_lock);
cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
/* check if instance equals */
if (instance != cld->cld_cfg.cfg_instance)
cfs_atomic_inc(&found->cld_refcount);
LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
}
- cfs_spin_unlock(&config_list_lock);
- RETURN(found);
+ spin_unlock(&config_list_lock);
+ RETURN(found);
}
static
cld->cld_cfg = *cfg;
else
cld->cld_cfg.cfg_callback = class_config_llog_handler;
- cfs_mutex_init(&cld->cld_lock);
+ mutex_init(&cld->cld_lock);
cld->cld_cfg.cfg_last_idx = 0;
cld->cld_cfg.cfg_flags = 0;
cld->cld_cfg.cfg_sb = sb;
rc = mgc_logname2resid(logname, &cld->cld_resid, type);
- cfs_spin_lock(&config_list_lock);
- cfs_list_add(&cld->cld_list_chain, &config_llog_list);
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ cfs_list_add(&cld->cld_list_chain, &config_llog_list);
+ spin_unlock(&config_list_lock);
if (rc) {
config_log_put(cld);
RETURN(0);
}
-CFS_DEFINE_MUTEX(llog_process_lock);
+DEFINE_MUTEX(llog_process_lock);
/** Stop watching for updates on this log.
*/
if (cld == NULL)
RETURN(-ENOENT);
- cfs_mutex_lock(&cld->cld_lock);
+ mutex_lock(&cld->cld_lock);
/*
* if cld_stopping is set, it means we didn't start the log thus
* not owning the start ref. this can happen after previous umount:
* calling start_log.
*/
if (unlikely(cld->cld_stopping)) {
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
/* drop the ref from the find */
config_log_put(cld);
RETURN(rc);
cld_recover = cld->cld_recover;
cld->cld_recover = NULL;
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
- if (cld_recover) {
- cfs_mutex_lock(&cld_recover->cld_lock);
- cld_recover->cld_stopping = 1;
- cfs_mutex_unlock(&cld_recover->cld_lock);
- config_log_put(cld_recover);
- }
+ if (cld_recover) {
+ mutex_lock(&cld_recover->cld_lock);
+ cld_recover->cld_stopping = 1;
+ mutex_unlock(&cld_recover->cld_lock);
+ config_log_put(cld_recover);
+ }
- cfs_spin_lock(&config_list_lock);
- cld_sptlrpc = cld->cld_sptlrpc;
- cld->cld_sptlrpc = NULL;
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ cld_sptlrpc = cld->cld_sptlrpc;
+ cld->cld_sptlrpc = NULL;
+ spin_unlock(&config_list_lock);
if (cld_sptlrpc)
config_log_put(cld_sptlrpc);
OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
rc += snprintf(page + rc, count - rc, "client_state:\n");
- cfs_spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
- if (cld->cld_recover == NULL)
- continue;
- rc += snprintf(page + rc, count - rc,
- " - { client: %s, nidtbl_version: %u }\n",
- cld->cld_logname,
- cld->cld_recover->cld_cfg.cfg_last_idx);
- }
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+ if (cld->cld_recover == NULL)
+ continue;
+ rc += snprintf(page + rc, count - rc,
+ " - { client: %s, nidtbl_version: %u }\n",
+ cld->cld_logname,
+ cld->cld_recover->cld_cfg.cfg_last_idx);
+ }
+ spin_unlock(&config_list_lock);
- RETURN(rc);
+ RETURN(rc);
}
/* reenqueue any lost locks */
#define RQ_STOP 0x8
static int rq_state = 0;
static cfs_waitq_t rq_waitq;
-static CFS_DECLARE_COMPLETION(rq_exit);
+static DECLARE_COMPLETION(rq_exit);
static void do_requeue(struct config_llog_data *cld)
{
/* Do not run mgc_process_log on a disconnected export or an
export which is being disconnected. Take the client
semaphore to make the check non-racy. */
- cfs_down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
cld->cld_logname);
}
- cfs_up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+ up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
EXIT;
}
CDEBUG(D_MGC, "Starting requeue thread\n");
/* Keep trying failed locks periodically */
- cfs_spin_lock(&config_list_lock);
- rq_state |= RQ_RUNNING;
- while (1) {
- struct l_wait_info lwi;
- struct config_llog_data *cld, *cld_prev;
- int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
- int stopped = !!(rq_state & RQ_STOP);
- int to;
-
- /* Any new or requeued lostlocks will change the state */
- rq_state &= ~(RQ_NOW | RQ_LATER);
- cfs_spin_unlock(&config_list_lock);
+ spin_lock(&config_list_lock);
+ rq_state |= RQ_RUNNING;
+ while (1) {
+ struct l_wait_info lwi;
+ struct config_llog_data *cld, *cld_prev;
+ int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
+ int stopped = !!(rq_state & RQ_STOP);
+ int to;
+
+ /* Any new or requeued lostlocks will change the state */
+ rq_state &= ~(RQ_NOW | RQ_LATER);
+ spin_unlock(&config_list_lock);
/* Always wait a few seconds to allow the server who
caused the lock revocation to finish its setup, plus some
*/
cld_prev = NULL;
- cfs_spin_lock(&config_list_lock);
- cfs_list_for_each_entry(cld, &config_llog_list,
- cld_list_chain) {
- if (!cld->cld_lostlock)
- continue;
+ spin_lock(&config_list_lock);
+ cfs_list_for_each_entry(cld, &config_llog_list,
+ cld_list_chain) {
+ if (!cld->cld_lostlock)
+ continue;
- cfs_spin_unlock(&config_list_lock);
+ spin_unlock(&config_list_lock);
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
if (likely(!stopped))
do_requeue(cld);
- cfs_spin_lock(&config_list_lock);
- }
- cfs_spin_unlock(&config_list_lock);
- if (cld_prev)
- config_log_put(cld_prev);
-
- /* break after scanning the list so that we can drop
- * refcount to losing lock clds */
- if (unlikely(stopped)) {
- cfs_spin_lock(&config_list_lock);
- break;
- }
+ spin_lock(&config_list_lock);
+ }
+ spin_unlock(&config_list_lock);
+ if (cld_prev)
+ config_log_put(cld_prev);
+
+ /* break after scanning the list so that we can drop
+ * refcount to losing lock clds */
+ if (unlikely(stopped)) {
+ spin_lock(&config_list_lock);
+ break;
+ }
- /* Wait a bit to see if anyone else needs a requeue */
- lwi = (struct l_wait_info) { 0 };
- l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
- &lwi);
- cfs_spin_lock(&config_list_lock);
- }
- /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
- rq_state &= ~RQ_RUNNING;
- cfs_spin_unlock(&config_list_lock);
+ /* Wait a bit to see if anyone else needs a requeue */
+ lwi = (struct l_wait_info) { 0 };
+ l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
+ &lwi);
+ spin_lock(&config_list_lock);
+ }
+ /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
+ rq_state &= ~RQ_RUNNING;
+ spin_unlock(&config_list_lock);
- cfs_complete(&rq_exit);
+ complete(&rq_exit);
- CDEBUG(D_MGC, "Ending requeue thread\n");
- RETURN(rc);
+ CDEBUG(D_MGC, "Ending requeue thread\n");
+ RETURN(rc);
}
/* Add a cld to the list to requeue. Start the requeue thread if needed.
cld->cld_stopping, rq_state);
LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
- cfs_mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping || cld->cld_lostlock) {
- cfs_mutex_unlock(&cld->cld_lock);
- RETURN_EXIT;
- }
- /* this refcount will be released in mgc_requeue_thread. */
- config_log_get(cld);
- cld->cld_lostlock = 1;
- cfs_mutex_unlock(&cld->cld_lock);
-
- /* Hold lock for rq_state */
- cfs_spin_lock(&config_list_lock);
- if (rq_state & RQ_STOP) {
- cfs_spin_unlock(&config_list_lock);
- cld->cld_lostlock = 0;
- config_log_put(cld);
- } else {
- rq_state |= RQ_NOW;
- cfs_spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
- }
- EXIT;
+ mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping || cld->cld_lostlock) {
+ mutex_unlock(&cld->cld_lock);
+ RETURN_EXIT;
+ }
+ /* this refcount will be released in mgc_requeue_thread. */
+ config_log_get(cld);
+ cld->cld_lostlock = 1;
+ mutex_unlock(&cld->cld_lock);
+
+ /* Hold lock for rq_state */
+ spin_lock(&config_list_lock);
+ if (rq_state & RQ_STOP) {
+ spin_unlock(&config_list_lock);
+ cld->cld_lostlock = 0;
+ config_log_put(cld);
+ } else {
+ rq_state |= RQ_NOW;
+ spin_unlock(&config_list_lock);
+ cfs_waitq_signal(&rq_waitq);
+ }
+ EXIT;
}
/********************** class fns **********************/
LASSERT(lsi->lsi_srv_mnt == mnt);
/* The mgc fs exclusion sem. Only one fs can be setup at a time. */
- cfs_down(&cli->cl_mgc_sem);
+ down(&cli->cl_mgc_sem);
cfs_cleanup_group_info();
obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
if (IS_ERR(obd->obd_fsops)) {
- cfs_up(&cli->cl_mgc_sem);
- CERROR("No fstype %s rc=%ld\n", lsi->lsi_fstype,
- PTR_ERR(obd->obd_fsops));
+ up(&cli->cl_mgc_sem);
+ CERROR("%s: No fstype %s: rc = %ld\n", lsi->lsi_fstype,
+ obd->obd_name, PTR_ERR(obd->obd_fsops));
RETURN(PTR_ERR(obd->obd_fsops));
}
fsfilt_put_ops(obd->obd_fsops);
obd->obd_fsops = NULL;
cli->cl_mgc_vfsmnt = NULL;
- cfs_up(&cli->cl_mgc_sem);
+ up(&cli->cl_mgc_sem);
RETURN(err);
}
if (obd->obd_fsops)
fsfilt_put_ops(obd->obd_fsops);
- cfs_up(&cli->cl_mgc_sem);
+ up(&cli->cl_mgc_sem);
RETURN(rc);
}
if (cfs_atomic_dec_and_test(&mgc_count)) {
int running;
/* stop requeue thread */
- cfs_spin_lock(&config_list_lock);
- running = rq_state & RQ_RUNNING;
- if (running)
- rq_state |= RQ_STOP;
- cfs_spin_unlock(&config_list_lock);
- if (running) {
- cfs_waitq_signal(&rq_waitq);
- cfs_wait_for_completion(&rq_exit);
+ spin_lock(&config_list_lock);
+ running = rq_state & RQ_RUNNING;
+ if (running)
+ rq_state |= RQ_STOP;
+ spin_unlock(&config_list_lock);
+ if (running) {
+ cfs_waitq_signal(&rq_waitq);
+ wait_for_completion(&rq_exit);
}
}
obd_cleanup_client_import(obd);
static void mgc_notify_active(struct obd_device *unused)
{
- /* wakeup mgc_requeue_thread to requeue mgc lock */
- cfs_spin_lock(&config_list_lock);
- rq_state |= RQ_NOW;
- cfs_spin_unlock(&config_list_lock);
- cfs_waitq_signal(&rq_waitq);
+ /* wakeup mgc_requeue_thread to requeue mgc lock */
+ spin_lock(&config_list_lock);
+ rq_state |= RQ_NOW;
+ spin_unlock(&config_list_lock);
+ cfs_waitq_signal(&rq_waitq);
- /* TODO: Help the MGS rebuild nidtbl. -jay */
+ /* TODO: Help the MGS rebuild nidtbl. -jay */
}
/* Send target_reg message to MGS */
pos += sprintf(params, "%s.import=%s", cname, "connection=");
uuid = buf + pos;
- cfs_down_read(&obd->u.cli.cl_sem);
+ down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import == NULL) {
/* client does not connect to the OST yet */
- cfs_up_read(&obd->u.cli.cl_sem);
+ up_read(&obd->u.cli.cl_sem);
rc = 0;
continue;
}
rc = client_import_find_conn(obd->u.cli.cl_import,
entry->u.nids[0],
(struct obd_uuid *)uuid);
- cfs_up_read(&obd->u.cli.cl_sem);
+ up_read(&obd->u.cli.cl_sem);
if (rc < 0) {
CERROR("mgc: cannot find uuid by nid %s\n",
libcfs_nid2str(entry->u.nids[0]));
again:
LASSERT(cld_is_recover(cld));
- LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
+ LASSERT(mutex_is_locked(&cld->cld_lock));
req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
&RQF_MGS_CONFIG_READ);
if (req == NULL)
ENTRY;
LASSERT(cld);
- LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
+ LASSERT(mutex_is_locked(&cld->cld_lock));
/*
* local copy of sptlrpc log is controlled elsewhere, don't try to
sounds like badness. It actually might be fine, as long as
we're not trying to update from the same log
simultaneously (in which case we should use a per-log sem.) */
- cfs_mutex_lock(&cld->cld_lock);
- if (cld->cld_stopping) {
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_lock(&cld->cld_lock);
+ if (cld->cld_stopping) {
+ mutex_unlock(&cld->cld_lock);
RETURN(0);
}
CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
- cfs_mutex_unlock(&cld->cld_lock);
+ mutex_unlock(&cld->cld_lock);
/* Now drop the lock so MGS can revoke it */
if (!rcl) {
if (rc)
goto out;
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
seq_show_srpc_rules(seq, fsdb->fsdb_name, &fsdb->fsdb_srpc_gen);
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
out:
lu_env_fini(&env);
struct mgs_tgt_srpc_conf *srpc_tgt;
int i;
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
seq_printf(seq, "fsname: %s\n", fsdb->fsdb_name);
seq_printf(seq, "flags: %#lx gen: %d\n",
fsdb->fsdb_flags, fsdb->fsdb_gen);
for (i = 0; i < INDEX_MAP_SIZE * 8; i++)
- if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
+ if (test_bit(i, fsdb->fsdb_mdt_index_map))
seq_printf(seq, "%s-MDT%04x\n", fsdb->fsdb_name, i);
for (i = 0; i < INDEX_MAP_SIZE * 8; i++)
- if (cfs_test_bit(i, fsdb->fsdb_ost_index_map))
+ if (test_bit(i, fsdb->fsdb_ost_index_map))
seq_printf(seq, "%s-OST%04x\n", fsdb->fsdb_name, i);
seq_printf(seq, "\nSecure RPC Config Rules:\n");
lprocfs_rd_ir_state(seq, fsdb);
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
return 0;
}
struct lustre_handle lockh;
/* clear the bit before lock put */
- cfs_clear_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags);
+ clear_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags);
ldlm_lock2handle(lock, &lockh);
ldlm_lock_decref_and_cancel(&lockh, LCK_EX);
switch (type) {
case CONFIG_T_CONFIG:
cp = mgs_completion_ast_config;
- if (cfs_test_and_set_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags))
+ if (test_and_set_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags))
rc = -EALREADY;
break;
case CONFIG_T_RECOVER:
le64_to_cpu(res_id.name[1]), rc);
if (type == CONFIG_T_CONFIG)
- cfs_clear_bit(FSDB_REVOKING_LOCK,
+ clear_bit(FSDB_REVOKING_LOCK,
&fsdb->fsdb_flags);
}
/* lock has been cancelled in completion_ast. */
if (rc)
return rc;
- cfs_mutex_lock(&fsdb->fsdb_mutex);
- if (sptlrpc_rule_set_choose(&fsdb->fsdb_srpc_gen,
- LUSTRE_SP_MGC, LUSTRE_SP_MGS,
- req->rq_peer.nid,
- &flvr) == 0) {
- /* by defualt allow any flavors */
- flvr.sf_rpc = SPTLRPC_FLVR_ANY;
- }
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
+ if (sptlrpc_rule_set_choose(&fsdb->fsdb_srpc_gen,
+ LUSTRE_SP_MGC, LUSTRE_SP_MGS,
+ req->rq_peer.nid,
+ &flvr) == 0) {
+ /* by defualt allow any flavors */
+ flvr.sf_rpc = SPTLRPC_FLVR_ANY;
+ }
+ mutex_unlock(&fsdb->fsdb_mutex);
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
rc = -EACCES;
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
} else {
if (exp->exp_sp_peer != req->rq_sp_from) {
CERROR("RPC source %s doesn't match %s\n",
static inline int mgs_init_export(struct obd_export *exp)
{
- struct mgs_export_data *data = &exp->u.eu_mgs_data;
+ struct mgs_export_data *data = &exp->u.eu_mgs_data;
- /* init mgs_export_data for fsc */
- cfs_spin_lock_init(&data->med_lock);
- CFS_INIT_LIST_HEAD(&data->med_clients);
+ /* init mgs_export_data for fsc */
+ spin_lock_init(&data->med_lock);
+ CFS_INIT_LIST_HEAD(&data->med_clients);
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_connecting = 1;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
+ exp->exp_connecting = 1;
+ spin_unlock(&exp->exp_lock);
/* self-export doesn't need client data and ldlm initialization */
if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
/* Internal mgs setup */
mgs_init_fsdb_list(mgs);
- cfs_mutex_init(&mgs->mgs_mutex);
+ mutex_init(&mgs->mgs_mutex);
mgs->mgs_start_time = cfs_time_current_sec();
- cfs_spin_lock_init(&mgs->mgs_lock);
+ spin_lock_init(&mgs->mgs_lock);
/* Setup proc */
lprocfs_mgs_init_vars(&lvars);
struct mgs_nidtbl {
struct fs_db *mn_fsdb;
struct file *mn_version_file;
- cfs_mutex_t mn_lock;
+ struct mutex mn_lock;
u64 mn_version;
int mn_nr_targets;
cfs_list_t mn_targets;
struct fs_db {
char fsdb_name[9];
cfs_list_t fsdb_list; /* list of databases */
- cfs_mutex_t fsdb_mutex;
+ struct mutex fsdb_mutex;
void *fsdb_ost_index_map; /* bitmap of used indicies */
void *fsdb_mdt_index_map; /* bitmap of used indicies */
int fsdb_mdt_count;
/* async thread to notify clients */
struct mgs_device *fsdb_mgs;
cfs_waitq_t fsdb_notify_waitq;
- cfs_completion_t fsdb_notify_comp;
+ struct completion fsdb_notify_comp;
cfs_time_t fsdb_notify_start;
cfs_atomic_t fsdb_notify_phase;
volatile int fsdb_notify_async:1,
struct dt_object *mgs_configs_dir;
struct dt_object *mgs_nidtbl_dir;
cfs_list_t mgs_fs_db_list;
- cfs_spinlock_t mgs_lock; /* covers mgs_fs_db_list */
+ spinlock_t mgs_lock; /* covers mgs_fs_db_list */
cfs_proc_dir_entry_t *mgs_proc_live;
cfs_proc_dir_entry_t *mgs_proc_mntdev;
cfs_time_t mgs_start_time;
struct obd_device *mgs_obd;
struct local_oid_storage *mgs_los;
- cfs_mutex_t mgs_mutex;
+ struct mutex mgs_mutex;
};
/* this is a top object */
CDEBUG(D_MGS, "OST index for %s is %u (%s)\n",
lustre_cfg_string(lcfg, 1), index,
lustre_cfg_string(lcfg, 2));
- cfs_set_bit(index, fsdb->fsdb_ost_index_map);
+ set_bit(index, fsdb->fsdb_ost_index_map);
}
/* Figure out mdt indicies */
}
rc = 0;
CDEBUG(D_MGS, "MDT index is %u\n", index);
- cfs_set_bit(index, fsdb->fsdb_mdt_index_map);
+ set_bit(index, fsdb->fsdb_mdt_index_map);
fsdb->fsdb_mdt_count ++;
}
/*
* compat to 1.8, check osc name used by MDT0 to OSTs, bz18548.
*/
- if (!cfs_test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags) &&
+ if (!test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags) &&
lcfg->lcfg_command == LCFG_ATTACH &&
strcmp(lustre_cfg_string(lcfg, 1), LUSTRE_OSC_NAME) == 0) {
if (OBD_OCD_VERSION_MAJOR(d->ver) == 1 &&
OBD_OCD_VERSION_MINOR(d->ver) <= 8) {
CWARN("MDT using 1.8 OSC name scheme\n");
- cfs_set_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags);
+ set_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags);
}
}
GOTO(out_close, rc);
if (llog_get_size(loghandle) <= 1)
- cfs_set_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags);
+ set_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags);
rc = llog_process(env, loghandle, mgs_fsdb_handler, (void *)&d, NULL);
CDEBUG(D_INFO, "get_db = %d\n", rc);
RETURN(NULL);
strcpy(fsdb->fsdb_name, fsname);
- cfs_mutex_init(&fsdb->fsdb_mutex);
- cfs_set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
+ mutex_init(&fsdb->fsdb_mutex);
+ set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
fsdb->fsdb_gen = 1;
if (strcmp(fsname, MGSSELF_NAME) == 0) {
- cfs_set_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags);
+ set_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags);
} else {
OBD_ALLOC(fsdb->fsdb_ost_index_map, INDEX_MAP_SIZE);
OBD_ALLOC(fsdb->fsdb_mdt_index_map, INDEX_MAP_SIZE);
static void mgs_free_fsdb(struct mgs_device *mgs, struct fs_db *fsdb)
{
/* wait for anyone with the sem */
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
lproc_mgs_del_live(mgs, fsdb);
cfs_list_del(&fsdb->fsdb_list);
name_destroy(&fsdb->fsdb_clilov);
name_destroy(&fsdb->fsdb_clilmv);
mgs_free_fsdb_srpc(fsdb);
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
OBD_FREE_PTR(fsdb);
}
{
struct fs_db *fsdb;
cfs_list_t *tmp, *tmp2;
- cfs_mutex_lock(&mgs->mgs_mutex);
+ mutex_lock(&mgs->mgs_mutex);
cfs_list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
mgs_free_fsdb(mgs, fsdb);
}
- cfs_mutex_unlock(&mgs->mgs_mutex);
+ mutex_unlock(&mgs->mgs_mutex);
return 0;
}
int rc = 0;
ENTRY;
- cfs_mutex_lock(&mgs->mgs_mutex);
+ mutex_lock(&mgs->mgs_mutex);
fsdb = mgs_find_fsdb(mgs, name);
if (fsdb) {
- cfs_mutex_unlock(&mgs->mgs_mutex);
+ mutex_unlock(&mgs->mgs_mutex);
*dbh = fsdb;
RETURN(0);
}
fsdb = mgs_new_fsdb(env, mgs, name);
/* lock fsdb_mutex until the db is loaded from llogs */
if (fsdb)
- cfs_mutex_lock(&fsdb->fsdb_mutex);
- cfs_mutex_unlock(&mgs->mgs_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
+ mutex_unlock(&mgs->mgs_mutex);
if (!fsdb)
RETURN(-ENOMEM);
- if (!cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
+ if (!test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
/* populate the db from the client llog */
rc = mgs_get_fsdb_from_llog(env, mgs, fsdb);
if (rc) {
GOTO(out_free, rc);
}
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
*dbh = fsdb;
RETURN(0);
out_free:
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
mgs_free_fsdb(mgs, fsdb);
return rc;
}
RETURN(rc);
}
- if (cfs_test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags))
+ if (test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags))
RETURN(-1);
if (mti->mti_flags & LDD_F_SV_TYPE_OST)
else
RETURN(-EINVAL);
- if (cfs_test_bit(mti->mti_stripe_index, imap))
+ if (test_bit(mti->mti_stripe_index, imap))
RETURN(1);
RETURN(0);
}
{
int i;
for (i = 0; i < map_len * 8; i++)
- if (!cfs_test_bit(i, index_map)) {
+ if (!test_bit(i, index_map)) {
return i;
}
CERROR("max index %d exceeded.\n", i);
RETURN(rc);
}
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
if (mti->mti_flags & LDD_F_SV_TYPE_OST) {
imap = fsdb->fsdb_ost_index_map;
} else if (mti->mti_flags & LDD_F_SV_TYPE_MDT) {
GOTO(out_up, rc = -ERANGE);
}
- if (cfs_test_bit(mti->mti_stripe_index, imap)) {
+ if (test_bit(mti->mti_stripe_index, imap)) {
if ((mti->mti_flags & LDD_F_VIRGIN) &&
!(mti->mti_flags & LDD_F_WRITECONF)) {
LCONSOLE_ERROR_MSG(0x140, "Server %s requested index "
}
}
- cfs_set_bit(mti->mti_stripe_index, imap);
- cfs_clear_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags);
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ set_bit(mti->mti_stripe_index, imap);
+ clear_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags);
+ mutex_unlock(&fsdb->fsdb_mutex);
server_make_name(mti->mti_flags & ~(LDD_F_VIRGIN | LDD_F_WRITECONF),
mti->mti_stripe_index, mti->mti_fsname, mti->mti_svname);
RETURN(0);
out_up:
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
return rc;
}
ENTRY;
- LASSERT(cfs_mutex_is_locked(&fsdb->fsdb_mutex));
+ LASSERT(mutex_is_locked(&fsdb->fsdb_mutex));
CDEBUG(D_MGS, "modify %s/%s/%s fl=%x\n", logname, devname, comment,
flags);
if (rc)
return rc;
/* COMPAT_180 */
- if (i == 0 && cfs_test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
+ if (i == 0 && test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
rc = name_create(lovname, fsdb->fsdb_name, "-mdtlov");
else
rc = name_create(lovname, *logname, "-mdtlov");
{
char suffix[16];
- if (i == 0 && cfs_test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
+ if (i == 0 && test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
sprintf(suffix, "-osc");
else
sprintf(suffix, "-osc-MDT%04x", i);
for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
char *mdtname;
if (i != mti->mti_stripe_index &&
- cfs_test_bit(i, fsdb->fsdb_mdt_index_map)) {
+ test_bit(i, fsdb->fsdb_mdt_index_map)) {
rc = name_create_mdt(&mdtname, mti->mti_fsname, i);
if (rc)
GOTO(out_end, rc);
GOTO(out_free, rc);
/* for the system upgraded from old 1.8, keep using the old osc naming
* style for mdt, see name_create_mdt_osc(). LU-1257 */
- if (cfs_test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
+ if (test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
rc = name_create(&oscname, svname, "");
else
rc = name_create(&oscname, svname, suffix);
/* We also have to update the other logs where this osc is part of
the lov */
- if (cfs_test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags)) {
+ if (test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags)) {
/* If we're upgrading, the old mdt log already has our
entry. Let's do a fake one for fun. */
/* Note that we can't add any new failnids, since we don't
/* Add ost to all MDT lov defs */
for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
- if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map)) {
+ if (test_bit(i, fsdb->fsdb_mdt_index_map)) {
char mdt_index[9];
rc = name_create_mdt_and_lov(&logname, &lovname, fsdb,
int i;
for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
- if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
+ if (!test_bit(i, fsdb->fsdb_mdt_index_map))
continue;
rc = name_create_mdt(&logname, mti->mti_fsname, i);
if (rc)
goto error_out;
if (strcmp(ptr, "yes") == 0) {
- cfs_set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
+ set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
CWARN("Enable user descriptor shipping from client to MDT\n");
} else if (strcmp(ptr, "no") == 0) {
- cfs_clear_bit(FSDB_UDESC, &fsdb->fsdb_flags);
+ clear_bit(FSDB_UDESC, &fsdb->fsdb_flags);
CWARN("Disable user descriptor shipping from client to MDT\n");
} else {
*(ptr - 1) = '=';
RETURN(rc);
/* mgs rules implies must be mgc->mgs */
- if (cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
+ if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
if ((rule.sr_from != LUSTRE_SP_MGC &&
rule.sr_from != LUSTRE_SP_ANY) ||
(rule.sr_to != LUSTRE_SP_MGS &&
if (rc)
goto out_free;
- if (cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
+ if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
/*
* for mgs rules, make them effective immediately.
*/
/* Modify mdtlov */
/* Add to all MDT logs for CMD */
for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
- if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
+ if (!test_bit(i, fsdb->fsdb_mdt_index_map))
continue;
rc = name_create_mdt(&logname, mti->mti_fsname, i);
if (rc)
"changes were made to the "
"config log.\n",
mti->mti_svname, rc);
- if (cfs_test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags))
+ if (test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags))
LCONSOLE_ERROR_MSG(0x146, "This may be"
" because the log"
"is in the old 1.4"
(class_match_param(ptr, PARAM_LLITE, NULL) == 0)) {
char *cname;
- if (cfs_test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags)) {
+ if (test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags)) {
LCONSOLE_ERROR_MSG(0x148, "Upgraded client logs for %s"
" cannot be modified. Consider"
" updating the configuration with"
int i;
for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
- if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
+ if (!test_bit(i, fsdb->fsdb_mdt_index_map))
continue;
name_destroy(&cname);
rc = name_create_mdt_osc(&cname, mti->mti_svname,
goto active_err;
if (rc & LDD_F_SV_ALL) {
for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
- if (!cfs_test_bit(i,
+ if (!test_bit(i,
fsdb->fsdb_mdt_index_map))
continue;
rc = name_create_mdt(&logname,
the failover list. Modify mti->params for rewriting back at
server_register_target(). */
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
rc = mgs_write_log_add_failnid(obd, fsdb, mti);
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
RETURN(rc);
#endif
mti->mti_flags &= ~LDD_F_UPDATE;
}
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
if (mti->mti_flags &
(LDD_F_VIRGIN | LDD_F_UPGRADE14 | LDD_F_WRITECONF)) {
OBD_FREE(buf, strlen(mti->mti_params) + 1);
out_up:
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
RETURN(rc);
}
if (rc)
RETURN(rc);
- cfs_mutex_lock(&mgs->mgs_mutex);
+ mutex_lock(&mgs->mgs_mutex);
/* Delete the fs db */
fsdb = mgs_find_fsdb(mgs, fsname);
if (fsdb)
mgs_free_fsdb(mgs, fsdb);
- cfs_mutex_unlock(&mgs->mgs_mutex);
+ mutex_unlock(&mgs->mgs_mutex);
cfs_list_for_each_entry_safe(dirent, n, &list, list) {
cfs_list_del(&dirent->list);
rc = mgs_find_or_make_fsdb(env, mgs, fsname, &fsdb);
if (rc)
RETURN(rc);
- if (!cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags) &&
- cfs_test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags)) {
+ if (!test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags) &&
+ test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags)) {
CERROR("No filesystem targets for %s. cfg_device from lctl "
"is '%s'\n", fsname, devname);
mgs_free_fsdb(mgs, fsdb);
mti->mti_flags = rc | LDD_F_PARAM;
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
rc = mgs_write_log_param(env, mgs, fsdb, mti, mti->mti_params);
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
/*
* Revoke lock so everyone updates. Should be alright if
CERROR("Can't get db for %s\n", fsname);
RETURN(rc);
}
- if (cfs_test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags)) {
+ if (test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags)) {
CERROR("%s is not defined\n", fsname);
mgs_free_fsdb(mgs, fsdb);
RETURN(-EINVAL);
break;
}
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
if (canceled_label != NULL) {
OBD_ALLOC_PTR(mti);
/* write pool def to all MDT logs */
for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
- if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map)) {
+ if (test_bit(i, fsdb->fsdb_mdt_index_map)) {
rc = name_create_mdt_and_lov(&logname, &lovname,
fsdb, i);
if (rc) {
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
GOTO(out_mti, rc);
}
if (canceled_label != NULL) {
name_destroy(&logname);
name_destroy(&lovname);
if (rc) {
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
GOTO(out_mti, rc);
}
}
rc = name_create(&logname, fsname, "-client");
if (rc) {
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
GOTO(out_mti, rc);
}
if (canceled_label != NULL) {
rc = mgs_modify(env, mgs, fsdb, mti, logname,
fsdb->fsdb_clilov, canceled_label, CM_SKIP);
if (rc < 0) {
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
name_destroy(&logname);
GOTO(out_mti, rc);
}
rc = mgs_write_log_pool(env, mgs, logname, fsdb, fsdb->fsdb_clilov,
cmd, fsname, poolname, ostname, label);
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
name_destroy(&logname);
/* request for update */
mgs_revoke_lock(mgs, fsdb, CONFIG_T_CONFIG);
struct mgs_nidtbl_target *tgt;
int version = 0;
- LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
+ LASSERT(mutex_is_locked(&tbl->mn_lock));
cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
if (!tgt->mnt_version)
continue;
LASSERT((unit_size & (unit_size - 1)) == 0);
LASSERT(nrpages << CFS_PAGE_SHIFT >= units_total * unit_size);
- cfs_mutex_lock(&tbl->mn_lock);
+ mutex_lock(&tbl->mn_lock);
LASSERT(nidtbl_is_sane(tbl));
/* no more entries ? */
LASSERT(version <= tbl->mn_version);
res->mcr_size = tbl->mn_version;
res->mcr_offset = nobuf ? version : tbl->mn_version;
- cfs_mutex_unlock(&tbl->mn_lock);
+ mutex_unlock(&tbl->mn_lock);
LASSERT(ergo(version == 1, rc == 0)); /* get the log first time */
CDEBUG(D_MGS, "Read IR logs %s return with %d, version %llu\n",
int rc;
ENTRY;
- LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
+ LASSERT(mutex_is_locked(&tbl->mn_lock));
fsdb = local_file_find_or_create(env, mgs->mgs_los, mgs->mgs_nidtbl_dir,
tbl->mn_fsdb->fsdb_name,
int rc;
ENTRY;
- LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
+ LASSERT(mutex_is_locked(&tbl->mn_lock));
LASSERT(mgs->mgs_nidtbl_dir);
rc = dt_lookup_dir(env, mgs->mgs_nidtbl_dir, tbl->mn_fsdb->fsdb_name,
LASSERT(type != 0);
tbl = &fsdb->fsdb_nidtbl;
- cfs_mutex_lock(&tbl->mn_lock);
+ mutex_lock(&tbl->mn_lock);
cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
struct mgs_target_info *info = &tgt->mnt_mti;
if (type == tgt->mnt_type &&
EXIT;
out:
- cfs_mutex_unlock(&tbl->mn_lock);
+ mutex_unlock(&tbl->mn_lock);
if (rc)
CERROR("Write NID table version for file system %s error %d\n",
fsdb->fsdb_name, rc);
struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
CFS_LIST_HEAD(head);
- cfs_mutex_lock(&tbl->mn_lock);
+ mutex_lock(&tbl->mn_lock);
tbl->mn_nr_targets = 0;
cfs_list_splice_init(&tbl->mn_targets, &head);
- cfs_mutex_unlock(&tbl->mn_lock);
+ mutex_unlock(&tbl->mn_lock);
while (!cfs_list_empty(&head)) {
struct mgs_nidtbl_target *tgt;
int rc;
CFS_INIT_LIST_HEAD(&tbl->mn_targets);
- cfs_mutex_init(&tbl->mn_lock);
+ mutex_init(&tbl->mn_lock);
tbl->mn_nr_targets = 0;
tbl->mn_fsdb = fsdb;
- cfs_mutex_lock(&tbl->mn_lock);
+ mutex_lock(&tbl->mn_lock);
rc = nidtbl_read_version(env, fsdb->fsdb_mgs, tbl, &tbl->mn_version);
- cfs_mutex_unlock(&tbl->mn_lock);
+ mutex_unlock(&tbl->mn_lock);
if (rc < 0)
CERROR("%s: IR: failed to read current version, rc = %d\n",
fsdb->fsdb_mgs->mgs_obd->obd_name, rc);
sprintf(name, "mgs_%s_notify", fsdb->fsdb_name);
cfs_daemonize(name);
- cfs_complete(&fsdb->fsdb_notify_comp);
+ complete(&fsdb->fsdb_notify_comp);
set_user_nice(current, -2);
mgs_revoke_lock(fsdb->fsdb_mgs, fsdb, CONFIG_T_RECOVER);
}
- cfs_complete(&fsdb->fsdb_notify_comp);
+ complete(&fsdb->fsdb_notify_comp);
return 0;
}
fsdb->fsdb_mgs = mgs;
cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
cfs_waitq_init(&fsdb->fsdb_notify_waitq);
- cfs_init_completion(&fsdb->fsdb_notify_comp);
+ init_completion(&fsdb->fsdb_notify_comp);
rc = cfs_create_thread(mgs_ir_notify, fsdb, CFS_DAEMON_FLAGS);
if (rc > 0)
- cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
+ wait_for_completion(&fsdb->fsdb_notify_comp);
else
CERROR("Start notify thread error %d\n", rc);
void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
{
- if (cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
+ if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
return;
mgs_fsc_cleanup_by_fsdb(fsdb);
fsdb->fsdb_notify_stop = 1;
cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
- cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
+ wait_for_completion(&fsdb->fsdb_notify_comp);
}
/* caller must have held fsdb_mutex */
return rc;
/* check ir state */
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
ir_state_graduate(fsdb);
switch (fsdb->fsdb_ir_state) {
case IR_FULL:
default:
LBUG();
}
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
if (notify) {
CDEBUG(D_MGS, "change fsr state of %s from %s to %s\n",
fsdb->fsdb_name, strings[fsdb->fsdb_ir_state], strings[state]);
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
if (state == IR_FULL && fsdb->fsdb_nonir_clients)
state = IR_PARTIAL;
fsdb->fsdb_ir_state = state;
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ mutex_unlock(&fsdb->fsdb_mutex);
return 0;
}
!!(exp->exp_connect_flags & OBD_CONNECT_IMP_RECOV);
rc = -EEXIST;
- cfs_mutex_lock(&fsdb->fsdb_mutex);
+ mutex_lock(&fsdb->fsdb_mutex);
- /* tend to find it in export list because this list is shorter. */
- cfs_spin_lock(&data->med_lock);
+ /* tend to find it in export list because this list is shorter. */
+ spin_lock(&data->med_lock);
cfs_list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
if (strcmp(fsname, fsc->mfc_fsdb->fsdb_name) == 0) {
found = true;
}
rc = 0;
}
- cfs_spin_unlock(&data->med_lock);
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
+ spin_unlock(&data->med_lock);
+ mutex_unlock(&fsdb->fsdb_mutex);
if (new_fsc) {
class_export_put(new_fsc->mfc_export);
void mgs_fsc_cleanup(struct obd_export *exp)
{
- struct mgs_export_data *data = &exp->u.eu_mgs_data;
- struct mgs_fsc *fsc, *tmp;
- CFS_LIST_HEAD(head);
-
- cfs_spin_lock(&data->med_lock);
- cfs_list_splice_init(&data->med_clients, &head);
- cfs_spin_unlock(&data->med_lock);
-
- cfs_list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
- struct fs_db *fsdb = fsc->mfc_fsdb;
-
- LASSERT(fsc->mfc_export == exp);
-
- cfs_mutex_lock(&fsdb->fsdb_mutex);
- cfs_list_del_init(&fsc->mfc_fsdb_list);
- if (fsc->mfc_ir_capable == 0) {
- --fsdb->fsdb_nonir_clients;
- LASSERT(fsdb->fsdb_ir_state != IR_FULL);
- if (fsdb->fsdb_nonir_clients == 0 &&
- fsdb->fsdb_ir_state == IR_PARTIAL)
- fsdb->fsdb_ir_state = IR_FULL;
- }
- cfs_mutex_unlock(&fsdb->fsdb_mutex);
- cfs_list_del_init(&fsc->mfc_export_list);
- class_export_put(fsc->mfc_export);
- OBD_FREE_PTR(fsc);
- }
+ struct mgs_export_data *data = &exp->u.eu_mgs_data;
+ struct mgs_fsc *fsc, *tmp;
+ CFS_LIST_HEAD(head);
+
+ spin_lock(&data->med_lock);
+ cfs_list_splice_init(&data->med_clients, &head);
+ spin_unlock(&data->med_lock);
+
+ cfs_list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
+ struct fs_db *fsdb = fsc->mfc_fsdb;
+
+ LASSERT(fsc->mfc_export == exp);
+
+ mutex_lock(&fsdb->fsdb_mutex);
+ cfs_list_del_init(&fsc->mfc_fsdb_list);
+ if (fsc->mfc_ir_capable == 0) {
+ --fsdb->fsdb_nonir_clients;
+ LASSERT(fsdb->fsdb_ir_state != IR_FULL);
+ if (fsdb->fsdb_nonir_clients == 0 &&
+ fsdb->fsdb_ir_state == IR_PARTIAL)
+ fsdb->fsdb_ir_state = IR_FULL;
+ }
+ mutex_unlock(&fsdb->fsdb_mutex);
+ cfs_list_del_init(&fsc->mfc_export_list);
+ class_export_put(fsc->mfc_export);
+ OBD_FREE_PTR(fsc);
+ }
}
/* must be called with fsdb->fsdb_mutex held */
LASSERT(fsdb == fsc->mfc_fsdb);
cfs_list_del_init(&fsc->mfc_fsdb_list);
- cfs_spin_lock(&data->med_lock);
- cfs_list_del_init(&fsc->mfc_export_list);
- cfs_spin_unlock(&data->med_lock);
+ spin_lock(&data->med_lock);
+ cfs_list_del_init(&fsc->mfc_export_list);
+ spin_unlock(&data->med_lock);
class_export_put(fsc->mfc_export);
OBD_FREE_PTR(fsc);
}
void cleanup_capa_hash(cfs_hlist_head_t *hash)
{
- int i;
- cfs_hlist_node_t *pos, *next;
- struct obd_capa *oc;
-
- cfs_spin_lock(&capa_lock);
- for (i = 0; i < NR_CAPAHASH; i++) {
- cfs_hlist_for_each_entry_safe(oc, pos, next, hash + i,
- u.tgt.c_hash)
- capa_delete(oc);
- }
- cfs_spin_unlock(&capa_lock);
+ int i;
+ cfs_hlist_node_t *pos, *next;
+ struct obd_capa *oc;
+
+ spin_lock(&capa_lock);
+ for (i = 0; i < NR_CAPAHASH; i++) {
+ cfs_hlist_for_each_entry_safe(oc, pos, next, hash + i,
+ u.tgt.c_hash)
+ capa_delete(oc);
+ }
+ spin_unlock(&capa_lock);
- OBD_FREE(hash, CFS_PAGE_SIZE);
+ OBD_FREE(hash, CFS_PAGE_SIZE);
}
EXPORT_SYMBOL(cleanup_capa_hash);
if (IS_ERR(ocapa))
return NULL;
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
old = find_capa(capa, head, 0);
if (!old) {
ocapa->c_capa = *capa;
capa_count[CAPA_SITE_SERVER]++;
if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE)
capa_delete_lru(list);
- cfs_spin_unlock(&capa_lock);
- return ocapa;
- } else {
- capa_get(old);
- cfs_spin_unlock(&capa_lock);
- capa_put(ocapa);
- return old;
- }
+ spin_unlock(&capa_lock);
+ return ocapa;
+ } else {
+ capa_get(old);
+ spin_unlock(&capa_lock);
+ capa_put(ocapa);
+ return old;
+ }
}
EXPORT_SYMBOL(capa_add);
struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa,
- int alive)
+ int alive)
{
- struct obd_capa *ocapa;
-
- cfs_spin_lock(&capa_lock);
- ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
- if (ocapa) {
- cfs_list_move_tail(&ocapa->c_list,
- &capa_list[CAPA_SITE_SERVER]);
- capa_get(ocapa);
- }
- cfs_spin_unlock(&capa_lock);
+ struct obd_capa *ocapa;
+
+ spin_lock(&capa_lock);
+ ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
+ if (ocapa) {
+ cfs_list_move_tail(&ocapa->c_list,
+ &capa_list[CAPA_SITE_SERVER]);
+ capa_get(ocapa);
+ }
+ spin_unlock(&capa_lock);
- return ocapa;
+ return ocapa;
}
EXPORT_SYMBOL(capa_lookup);
void capa_cpy(void *capa, struct obd_capa *ocapa)
{
- cfs_spin_lock(&ocapa->c_lock);
- *(struct lustre_capa *)capa = ocapa->c_capa;
- cfs_spin_unlock(&ocapa->c_lock);
+ spin_lock(&ocapa->c_lock);
+ *(struct lustre_capa *)capa = ocapa->c_capa;
+ spin_unlock(&ocapa->c_lock);
}
EXPORT_SYMBOL(capa_cpy);
LASSERT(page->cp_owner != NULL);
LINVRNT(plist->pl_owner == cfs_current());
- cfs_lockdep_off();
- cfs_mutex_lock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_lock(&page->cp_mutex);
+ lockdep_on();
LASSERT(cfs_list_empty(&page->cp_batch));
cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
ENTRY;
cfs_list_del_init(&page->cp_batch);
- cfs_lockdep_off();
- cfs_mutex_unlock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_unlock(&page->cp_mutex);
+ lockdep_on();
--plist->pl_nr;
lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
LASSERT(plist->pl_nr > 0);
cfs_list_del_init(&page->cp_batch);
- cfs_lockdep_off();
- cfs_mutex_unlock(&page->cp_mutex);
- cfs_lockdep_on();
+ lockdep_off();
+ mutex_unlock(&page->cp_mutex);
+ lockdep_on();
--plist->pl_nr;
/*
* cl_page_disown0 rather than usual cl_page_disown() is used,
#include "cl_internal.h"
/** Lock class of cl_lock::cll_guard */
-static cfs_lock_class_key_t cl_lock_guard_class;
+static struct lock_class_key cl_lock_guard_class;
static cfs_mem_cache_t *cl_lock_kmem;
static struct lu_kmem_descr cl_lock_caches[] = {
#define RETIP ((unsigned long)__builtin_return_address(0))
#ifdef CONFIG_LOCKDEP
-static cfs_lock_class_key_t cl_lock_key;
+static struct lock_class_key cl_lock_key;
static void cl_lock_lockdep_init(struct cl_lock *lock)
{
cl_object_put(env, obj);
lu_ref_fini(&lock->cll_reference);
lu_ref_fini(&lock->cll_holders);
- cfs_mutex_destroy(&lock->cll_guard);
+ mutex_destroy(&lock->cll_guard);
OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
EXIT;
}
CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
lu_ref_init(&lock->cll_reference);
lu_ref_init(&lock->cll_holders);
- cfs_mutex_init(&lock->cll_guard);
- cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
+ mutex_init(&lock->cll_guard);
+ lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
cfs_waitq_init(&lock->cll_wq);
head = obj->co_lu.lo_header;
cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
head = cl_object_header(obj);
site = cl_object_site(obj);
- cfs_spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- cfs_spin_unlock(&head->coh_lock_guard);
-
- if (lock == NULL) {
- lock = cl_lock_alloc(env, obj, io, need);
- if (!IS_ERR(lock)) {
- struct cl_lock *ghost;
-
- cfs_spin_lock(&head->coh_lock_guard);
- ghost = cl_lock_lookup(env, obj, io, need);
- if (ghost == NULL) {
- cfs_list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
- cfs_spin_unlock(&head->coh_lock_guard);
- cfs_atomic_inc(&site->cs_locks.cs_busy);
- } else {
- cfs_spin_unlock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
+ lock = cl_lock_lookup(env, obj, io, need);
+ spin_unlock(&head->coh_lock_guard);
+
+ if (lock == NULL) {
+ lock = cl_lock_alloc(env, obj, io, need);
+ if (!IS_ERR(lock)) {
+ struct cl_lock *ghost;
+
+ spin_lock(&head->coh_lock_guard);
+ ghost = cl_lock_lookup(env, obj, io, need);
+ if (ghost == NULL) {
+ cfs_list_add_tail(&lock->cll_linkage,
+ &head->coh_locks);
+ spin_unlock(&head->coh_lock_guard);
+ cfs_atomic_inc(&site->cs_locks.cs_busy);
+ } else {
+ spin_unlock(&head->coh_lock_guard);
/*
* Other threads can acquire references to the
* top-lock through its sub-locks. Hence, it
head = cl_object_header(obj);
do {
- cfs_spin_lock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
lock = cl_lock_lookup(env, obj, io, need);
- cfs_spin_unlock(&head->coh_lock_guard);
+ spin_unlock(&head->coh_lock_guard);
if (lock == NULL)
return NULL;
info = cl_env_info(env);
for (i = 0; i < hdr->coh_nesting; ++i)
LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
+ mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
lock->cll_guarder = cfs_current();
LINVRNT(lock->cll_depth == 0);
}
if (lock->cll_guarder == cfs_current()) {
LINVRNT(lock->cll_depth > 0);
cl_lock_mutex_tail(env, lock);
- } else if (cfs_mutex_trylock(&lock->cll_guard)) {
+ } else if (mutex_trylock(&lock->cll_guard)) {
LINVRNT(lock->cll_depth == 0);
lock->cll_guarder = cfs_current();
cl_lock_mutex_tail(env, lock);
counters->ctc_nr_locks_locked--;
if (--lock->cll_depth == 0) {
lock->cll_guarder = NULL;
- cfs_mutex_unlock(&lock->cll_guard);
+ mutex_unlock(&lock->cll_guard);
}
}
EXPORT_SYMBOL(cl_lock_mutex_put);
head = cl_object_header(lock->cll_descr.cld_obj);
- cfs_spin_lock(&head->coh_lock_guard);
- cfs_list_del_init(&lock->cll_linkage);
- cfs_spin_unlock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
+ cfs_list_del_init(&lock->cll_linkage);
+ spin_unlock(&head->coh_lock_guard);
/*
* From now on, no new references to this lock can be acquired
* now. If locks were indexed according to their extent and/or mode,
* that index would have to be updated here.
*/
- cfs_spin_lock(&hdr->coh_lock_guard);
- lock->cll_descr = *desc;
- cfs_spin_unlock(&hdr->coh_lock_guard);
- RETURN(0);
+ spin_lock(&hdr->coh_lock_guard);
+ lock->cll_descr = *desc;
+ spin_unlock(&hdr->coh_lock_guard);
+ RETURN(0);
}
EXPORT_SYMBOL(cl_lock_modify);
need->cld_start = need->cld_end = index;
need->cld_enq_flags = 0;
- cfs_spin_lock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
/* It is fine to match any group lock since there could be only one
* with a uniq gid and it conflicts with all other lock modes too */
cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
break;
}
}
- cfs_spin_unlock(&head->coh_lock_guard);
- RETURN(lock);
+ spin_unlock(&head->coh_lock_guard);
+ RETURN(lock);
}
EXPORT_SYMBOL(cl_lock_at_pgoff);
LASSERT(ergo(!cancel,
head->coh_tree.rnode == NULL && head->coh_pages == 0));
- cfs_spin_lock(&head->coh_lock_guard);
- while (!cfs_list_empty(&head->coh_locks)) {
- lock = container_of(head->coh_locks.next,
- struct cl_lock, cll_linkage);
- cl_lock_get_trust(lock);
- cfs_spin_unlock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
+ while (!cfs_list_empty(&head->coh_locks)) {
+ lock = container_of(head->coh_locks.next,
+ struct cl_lock, cll_linkage);
+ cl_lock_get_trust(lock);
+ spin_unlock(&head->coh_lock_guard);
lu_ref_add(&lock->cll_reference, "prune", cfs_current());
again:
cl_lock_mutex_put(env, lock);
lu_ref_del(&lock->cll_reference, "prune", cfs_current());
cl_lock_put(env, lock);
- cfs_spin_lock(&head->coh_lock_guard);
- }
- cfs_spin_unlock(&head->coh_lock_guard);
- EXIT;
+ spin_lock(&head->coh_lock_guard);
+ }
+ spin_unlock(&head->coh_lock_guard);
+ EXIT;
}
EXPORT_SYMBOL(cl_locks_prune);
static cfs_mem_cache_t *cl_env_kmem;
/** Lock class of cl_object_header::coh_page_guard */
-static cfs_lock_class_key_t cl_page_guard_class;
+static struct lock_class_key cl_page_guard_class;
/** Lock class of cl_object_header::coh_lock_guard */
-static cfs_lock_class_key_t cl_lock_guard_class;
+static struct lock_class_key cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
-static cfs_lock_class_key_t cl_attr_guard_class;
+static struct lock_class_key cl_attr_guard_class;
extern __u32 lu_context_tags_default;
extern __u32 lu_session_tags_default;
ENTRY;
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
- cfs_spin_lock_init(&h->coh_page_guard);
- cfs_spin_lock_init(&h->coh_lock_guard);
- cfs_spin_lock_init(&h->coh_attr_guard);
- cfs_lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
- cfs_lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
- cfs_lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
+ spin_lock_init(&h->coh_page_guard);
+ spin_lock_init(&h->coh_lock_guard);
+ spin_lock_init(&h->coh_attr_guard);
+ lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
+ lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
+ lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
h->coh_pages = 0;
/* XXX hard coded GFP_* mask. */
INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
*
* \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
*/
-static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o)
+static spinlock_t *cl_object_attr_guard(struct cl_object *o)
{
- return &cl_object_header(cl_object_top(o))->coh_attr_guard;
+ return &cl_object_header(cl_object_top(o))->coh_attr_guard;
}
/**
*/
void cl_object_attr_lock(struct cl_object *o)
{
- cfs_spin_lock(cl_object_attr_guard(o));
+ spin_lock(cl_object_attr_guard(o));
}
EXPORT_SYMBOL(cl_object_attr_lock);
*/
void cl_object_attr_unlock(struct cl_object *o)
{
- cfs_spin_unlock(cl_object_attr_guard(o));
+ spin_unlock(cl_object_attr_guard(o));
}
EXPORT_SYMBOL(cl_object_attr_unlock);
LASSERT(hdr->coh_tree.rnode == NULL);
LASSERT(hdr->coh_pages == 0);
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
/*
* Destroy all locks. Object destruction (including cl_inode_fini())
* cannot cancel the locks, because in the case of a local client,
*/
int cl_object_has_locks(struct cl_object *obj)
{
- struct cl_object_header *head = cl_object_header(obj);
- int has;
+ struct cl_object_header *head = cl_object_header(obj);
+ int has;
- cfs_spin_lock(&head->coh_lock_guard);
- has = cfs_list_empty(&head->coh_locks);
- cfs_spin_unlock(&head->coh_lock_guard);
+ spin_lock(&head->coh_lock_guard);
+ has = cfs_list_empty(&head->coh_locks);
+ spin_unlock(&head->coh_lock_guard);
- return (has == 0);
+ return (has == 0);
}
EXPORT_SYMBOL(cl_object_has_locks);
static struct lu_env *cl_env_obtain(void *debug)
{
- struct cl_env *cle;
- struct lu_env *env;
+ struct cl_env *cle;
+ struct lu_env *env;
- ENTRY;
- cfs_spin_lock(&cl_envs_guard);
- LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
- if (cl_envs_cached_nr > 0) {
- int rc;
+ ENTRY;
+ spin_lock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+ if (cl_envs_cached_nr > 0) {
+ int rc;
- cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- cfs_list_del_init(&cle->ce_linkage);
- cl_envs_cached_nr--;
- cfs_spin_unlock(&cl_envs_guard);
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ cfs_list_del_init(&cle->ce_linkage);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
env = &cle->ce_lu;
rc = lu_env_refill(env);
env = ERR_PTR(rc);
}
} else {
- cfs_spin_unlock(&cl_envs_guard);
- env = cl_env_new(lu_context_tags_default,
- lu_session_tags_default, debug);
- }
- RETURN(env);
+ spin_unlock(&cl_envs_guard);
+ env = cl_env_new(lu_context_tags_default,
+ lu_session_tags_default, debug);
+ }
+ RETURN(env);
}
static inline struct cl_env *cl_env_container(struct lu_env *env)
*/
unsigned cl_env_cache_purge(unsigned nr)
{
- struct cl_env *cle;
-
- ENTRY;
- cfs_spin_lock(&cl_envs_guard);
- for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
- cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
- cfs_list_del_init(&cle->ce_linkage);
- LASSERT(cl_envs_cached_nr > 0);
- cl_envs_cached_nr--;
- cfs_spin_unlock(&cl_envs_guard);
-
- cl_env_fini(cle);
- cfs_spin_lock(&cl_envs_guard);
- }
- LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
- cfs_spin_unlock(&cl_envs_guard);
- RETURN(nr);
+ struct cl_env *cle;
+
+ ENTRY;
+ spin_lock(&cl_envs_guard);
+ for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ cfs_list_del_init(&cle->ce_linkage);
+ LASSERT(cl_envs_cached_nr > 0);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ cl_env_fini(cle);
+ spin_lock(&cl_envs_guard);
+ }
+ LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+ spin_unlock(&cl_envs_guard);
+ RETURN(nr);
}
EXPORT_SYMBOL(cl_env_cache_purge);
if (cl_envs_cached_nr < cl_envs_cached_max &&
(env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
(env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
- cfs_spin_lock(&cl_envs_guard);
- cfs_list_add(&cle->ce_linkage, &cl_envs);
- cl_envs_cached_nr++;
- cfs_spin_unlock(&cl_envs_guard);
- } else
- cl_env_fini(cle);
- }
+ spin_lock(&cl_envs_guard);
+ cfs_list_add(&cle->ce_linkage, &cl_envs);
+ cl_envs_cached_nr++;
+ spin_unlock(&cl_envs_guard);
+ } else
+ cl_env_fini(cle);
+ }
}
EXPORT_SYMBOL(cl_env_put);
hdr = cl_object_header(obj);
pvec = cl_env_info(env)->clt_pvec;
dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
- cfs_spin_lock(&hdr->coh_page_guard);
+ spin_lock(&hdr->coh_page_guard);
while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
idx, CLT_PVEC_SIZE)) > 0) {
int end_of_region = 0;
* check that pages weren't truncated (cl_page_own() returns
* error in the latter case).
*/
- cfs_spin_unlock(&hdr->coh_page_guard);
+ spin_unlock(&hdr->coh_page_guard);
tree_lock = 0;
for (i = 0; i < j; ++i) {
if (res != CLP_GANG_OKAY)
break;
- cfs_spin_lock(&hdr->coh_page_guard);
- tree_lock = 1;
- }
- if (tree_lock)
- cfs_spin_unlock(&hdr->coh_page_guard);
- RETURN(res);
+ spin_lock(&hdr->coh_page_guard);
+ tree_lock = 1;
+ }
+ if (tree_lock)
+ spin_unlock(&hdr->coh_page_guard);
+ RETURN(res);
}
EXPORT_SYMBOL(cl_page_gang_lookup);
"cl_page", page);
page->cp_index = ind;
cl_page_state_set_trust(page, CPS_CACHED);
- cfs_spin_lock_init(&page->cp_lock);
- page->cp_type = type;
- CFS_INIT_LIST_HEAD(&page->cp_layers);
- CFS_INIT_LIST_HEAD(&page->cp_batch);
- CFS_INIT_LIST_HEAD(&page->cp_flight);
- cfs_mutex_init(&page->cp_mutex);
+ spin_lock_init(&page->cp_lock);
+ page->cp_type = type;
+ CFS_INIT_LIST_HEAD(&page->cp_layers);
+ CFS_INIT_LIST_HEAD(&page->cp_batch);
+ CFS_INIT_LIST_HEAD(&page->cp_flight);
+ mutex_init(&page->cp_mutex);
lu_ref_init(&page->cp_reference);
head = o->co_lu.lo_header;
cfs_list_for_each_entry(o, &head->loh_layers,
* XXX optimization: use radix_tree_preload() here, and change tree
* gfp mask to GFP_KERNEL in cl_object_header_init().
*/
- cfs_spin_lock(&hdr->coh_page_guard);
+ spin_lock(&hdr->coh_page_guard);
err = radix_tree_insert(&hdr->coh_tree, idx, page);
if (err != 0) {
ghost = page;
}
hdr->coh_pages++;
}
- cfs_spin_unlock(&hdr->coh_page_guard);
+ spin_unlock(&hdr->coh_page_guard);
if (unlikely(ghost != NULL)) {
cfs_atomic_dec(&site->cs_pages.cs_busy);
* inside the cp_lock. So that if it gets here,
* it is the REALLY last reference to this page.
*/
- cfs_spin_unlock(&page->cp_lock);
+ spin_unlock(&page->cp_lock);
LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
EXIT;
return;
}
- cfs_spin_unlock(&page->cp_lock);
- }
+ spin_unlock(&page->cp_lock);
+ }
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(cl_page_put);
if (top == NULL)
RETURN(NULL);
- cfs_spin_lock(&top->cp_lock);
- for (page = top; page != NULL; page = page->cp_child) {
- if (cl_object_same(page->cp_obj, obj)) {
- cl_page_get_trust(page);
- break;
- }
- }
- cfs_spin_unlock(&top->cp_lock);
- LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
- RETURN(page);
+ spin_lock(&top->cp_lock);
+ for (page = top; page != NULL; page = page->cp_child) {
+ if (cl_object_same(page->cp_obj, obj)) {
+ cl_page_get_trust(page);
+ break;
+ }
+ }
+ spin_unlock(&top->cp_lock);
+ LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
+ RETURN(page);
}
EXPORT_SYMBOL(cl_vmpage_page);
struct cl_object_header *hdr;
hdr = cl_object_header(tmp->cp_obj);
- cfs_spin_lock(&hdr->coh_page_guard);
- value = radix_tree_delete(&hdr->coh_tree,
- tmp->cp_index);
- PASSERT(env, tmp, value == tmp);
- PASSERT(env, tmp, hdr->coh_pages > 0);
- hdr->coh_pages--;
- cfs_spin_unlock(&hdr->coh_page_guard);
+ spin_lock(&hdr->coh_page_guard);
+ value = radix_tree_delete(&hdr->coh_tree,
+ tmp->cp_index);
+ PASSERT(env, tmp, value == tmp);
+ PASSERT(env, tmp, hdr->coh_pages > 0);
+ hdr->coh_pages--;
+ spin_unlock(&hdr->coh_page_guard);
}
}
#define obd_init_checks() do {} while(0)
#endif
-extern cfs_spinlock_t obd_types_lock;
+extern spinlock_t obd_types_lock;
extern int class_procfs_init(void);
extern int class_procfs_clean(void);
LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n");
- cfs_spin_lock_init(&obd_types_lock);
+ spin_lock_init(&obd_types_lock);
obd_zombie_impexp_init();
#ifdef LPROCFS
obd_memory = lprocfs_alloc_stats(OBD_STATS_NUM,
#include <lprocfs_status.h>
extern cfs_list_t obd_types;
-cfs_spinlock_t obd_types_lock;
+spinlock_t obd_types_lock;
cfs_mem_cache_t *obd_device_cachep;
cfs_mem_cache_t *obdo_cachep;
cfs_list_t obd_zombie_imports;
cfs_list_t obd_zombie_exports;
-cfs_spinlock_t obd_zombie_impexp_lock;
+spinlock_t obd_zombie_impexp_lock;
static void obd_zombie_impexp_notify(void);
static void obd_zombie_export_add(struct obd_export *exp);
static void obd_zombie_import_add(struct obd_import *imp);
struct obd_type *class_search_type(const char *name)
{
- cfs_list_t *tmp;
- struct obd_type *type;
+ cfs_list_t *tmp;
+ struct obd_type *type;
- cfs_spin_lock(&obd_types_lock);
- cfs_list_for_each(tmp, &obd_types) {
- type = cfs_list_entry(tmp, struct obd_type, typ_chain);
- if (strcmp(type->typ_name, name) == 0) {
- cfs_spin_unlock(&obd_types_lock);
- return type;
- }
- }
- cfs_spin_unlock(&obd_types_lock);
- return NULL;
+ spin_lock(&obd_types_lock);
+ cfs_list_for_each(tmp, &obd_types) {
+ type = cfs_list_entry(tmp, struct obd_type, typ_chain);
+ if (strcmp(type->typ_name, name) == 0) {
+ spin_unlock(&obd_types_lock);
+ return type;
+ }
+ }
+ spin_unlock(&obd_types_lock);
+ return NULL;
}
EXPORT_SYMBOL(class_search_type);
}
#endif
if (type) {
- cfs_spin_lock(&type->obd_type_lock);
- type->typ_refcnt++;
- cfs_try_module_get(type->typ_dt_ops->o_owner);
- cfs_spin_unlock(&type->obd_type_lock);
- }
- return type;
+ spin_lock(&type->obd_type_lock);
+ type->typ_refcnt++;
+ cfs_try_module_get(type->typ_dt_ops->o_owner);
+ spin_unlock(&type->obd_type_lock);
+ }
+ return type;
}
EXPORT_SYMBOL(class_get_type);
void class_put_type(struct obd_type *type)
{
- LASSERT(type);
- cfs_spin_lock(&type->obd_type_lock);
- type->typ_refcnt--;
- cfs_module_put(type->typ_dt_ops->o_owner);
- cfs_spin_unlock(&type->obd_type_lock);
+ LASSERT(type);
+ spin_lock(&type->obd_type_lock);
+ type->typ_refcnt--;
+ cfs_module_put(type->typ_dt_ops->o_owner);
+ spin_unlock(&type->obd_type_lock);
}
EXPORT_SYMBOL(class_put_type);
if (md_ops)
*(type->typ_md_ops) = *md_ops;
strcpy(type->typ_name, name);
- cfs_spin_lock_init(&type->obd_type_lock);
+ spin_lock_init(&type->obd_type_lock);
#ifdef LPROCFS
type->typ_procroot = lprocfs_register(type->typ_name, proc_lustre_root,
GOTO (failed, rc);
}
- cfs_spin_lock(&obd_types_lock);
- cfs_list_add(&type->typ_chain, &obd_types);
- cfs_spin_unlock(&obd_types_lock);
+ spin_lock(&obd_types_lock);
+ cfs_list_add(&type->typ_chain, &obd_types);
+ spin_unlock(&obd_types_lock);
RETURN (0);
if (type->typ_lu)
lu_device_type_fini(type->typ_lu);
- cfs_spin_lock(&obd_types_lock);
- cfs_list_del(&type->typ_chain);
- cfs_spin_unlock(&obd_types_lock);
+ spin_lock(&obd_types_lock);
+ cfs_list_del(&type->typ_chain);
+ spin_unlock(&obd_types_lock);
OBD_FREE(type->typ_name, strlen(name) + 1);
if (type->typ_dt_ops != NULL)
OBD_FREE_PTR(type->typ_dt_ops);
LASSERT(newdev->obd_magic == OBD_DEVICE_MAGIC);
- cfs_write_lock(&obd_dev_lock);
+ write_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
obd_devs[i] = result;
}
}
- cfs_write_unlock(&obd_dev_lock);
+ write_unlock(&obd_dev_lock);
if (result == NULL && i >= class_devno_max()) {
CERROR("all %u OBD devices used, increase MAX_OBD_DEVICES\n",
CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n",
obd->obd_name, obd->obd_minor, obd->obd_type->typ_name);
- cfs_write_lock(&obd_dev_lock);
+ write_lock(&obd_dev_lock);
obd_devs[obd->obd_minor] = NULL;
- cfs_write_unlock(&obd_dev_lock);
+ write_unlock(&obd_dev_lock);
obd_device_free(obd);
class_put_type(obd_type);
if (!name)
return -1;
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
out any references */
LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
if (obd->obd_attached) {
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return i;
}
break;
}
}
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return -1;
}
{
int i;
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
if (obd && obd_uuid_equals(uuid, &obd->obd_uuid)) {
LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return i;
}
}
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return -1;
}
char *status;
int i;
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
obd->obd_name, obd->obd_uuid.uuid,
cfs_atomic_read(&obd->obd_refcount));
}
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return;
}
{
int i;
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
&obd->u.cli.cl_target_uuid) &&
((grp_uuid)? obd_uuid_equals(grp_uuid,
&obd->obd_uuid) : 1)) {
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return obd;
}
}
}
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return NULL;
}
else
return NULL;
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
for (; i < class_devno_max(); i++) {
struct obd_device *obd = class_num2obd(i);
if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
if (next != NULL)
*next = i+1;
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return obd;
}
}
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return NULL;
}
LASSERT(namelen > 0);
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
obd = class_num2obd(i);
continue;
class_incref(obd, __FUNCTION__, obd);
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
rc2 = obd_set_info_async(NULL, obd->obd_self_export,
sizeof(KEY_SPTLRPC_CONF),
KEY_SPTLRPC_CONF, 0, NULL, NULL);
rc = rc ? rc : rc2;
class_decref(obd, __FUNCTION__, obd);
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
}
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
return rc;
}
EXPORT_SYMBOL(class_notify_sptlrpc_conf);
cfs_atomic_set(&export->exp_locks_count, 0);
#if LUSTRE_TRACKS_LOCK_EXP_REFS
CFS_INIT_LIST_HEAD(&export->exp_locks_list);
- cfs_spin_lock_init(&export->exp_locks_list_guard);
+ spin_lock_init(&export->exp_locks_list_guard);
#endif
- cfs_atomic_set(&export->exp_replay_count, 0);
- export->exp_obd = obd;
- CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies);
- cfs_spin_lock_init(&export->exp_uncommitted_replies_lock);
- CFS_INIT_LIST_HEAD(&export->exp_uncommitted_replies);
- CFS_INIT_LIST_HEAD(&export->exp_req_replay_queue);
- CFS_INIT_LIST_HEAD(&export->exp_handle.h_link);
- CFS_INIT_LIST_HEAD(&export->exp_hp_rpcs);
+ cfs_atomic_set(&export->exp_replay_count, 0);
+ export->exp_obd = obd;
+ CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies);
+ spin_lock_init(&export->exp_uncommitted_replies_lock);
+ CFS_INIT_LIST_HEAD(&export->exp_uncommitted_replies);
+ CFS_INIT_LIST_HEAD(&export->exp_req_replay_queue);
+ CFS_INIT_LIST_HEAD(&export->exp_handle.h_link);
+ CFS_INIT_LIST_HEAD(&export->exp_hp_rpcs);
class_handle_hash(&export->exp_handle, &export_handle_ops);
export->exp_last_request_time = cfs_time_current_sec();
- cfs_spin_lock_init(&export->exp_lock);
- cfs_spin_lock_init(&export->exp_rpc_lock);
- CFS_INIT_HLIST_NODE(&export->exp_uuid_hash);
- CFS_INIT_HLIST_NODE(&export->exp_nid_hash);
- cfs_spin_lock_init(&export->exp_bl_list_lock);
- CFS_INIT_LIST_HEAD(&export->exp_bl_list);
-
- export->exp_sp_peer = LUSTRE_SP_ANY;
- export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
- export->exp_client_uuid = *cluuid;
- obd_init_export(export);
-
- cfs_spin_lock(&obd->obd_dev_lock);
- /* shouldn't happen, but might race */
- if (obd->obd_stopping)
- GOTO(exit_unlock, rc = -ENODEV);
-
- hash = cfs_hash_getref(obd->obd_uuid_hash);
- if (hash == NULL)
- GOTO(exit_unlock, rc = -ENODEV);
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock_init(&export->exp_lock);
+ spin_lock_init(&export->exp_rpc_lock);
+ CFS_INIT_HLIST_NODE(&export->exp_uuid_hash);
+ CFS_INIT_HLIST_NODE(&export->exp_nid_hash);
+ spin_lock_init(&export->exp_bl_list_lock);
+ CFS_INIT_LIST_HEAD(&export->exp_bl_list);
+
+ export->exp_sp_peer = LUSTRE_SP_ANY;
+ export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
+ export->exp_client_uuid = *cluuid;
+ obd_init_export(export);
+
+ spin_lock(&obd->obd_dev_lock);
+ /* shouldn't happen, but might race */
+ if (obd->obd_stopping)
+ GOTO(exit_unlock, rc = -ENODEV);
+
+ hash = cfs_hash_getref(obd->obd_uuid_hash);
+ if (hash == NULL)
+ GOTO(exit_unlock, rc = -ENODEV);
+ spin_unlock(&obd->obd_dev_lock);
if (!obd_uuid_equals(cluuid, &obd->obd_uuid)) {
rc = cfs_hash_add_unique(hash, cluuid, &export->exp_uuid_hash);
}
}
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
if (obd->obd_stopping) {
cfs_hash_del(hash, cluuid, &export->exp_uuid_hash);
GOTO(exit_unlock, rc = -ENODEV);
cfs_list_add_tail(&export->exp_obd_chain_timed,
&export->exp_obd->obd_exports_timed);
export->exp_obd->obd_num_exports++;
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_hash_putref(hash);
- RETURN(export);
+ spin_unlock(&obd->obd_dev_lock);
+ cfs_hash_putref(hash);
+ RETURN(export);
exit_unlock:
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
exit_err:
if (hash)
cfs_hash_putref(hash);
void class_unlink_export(struct obd_export *exp)
{
- class_handle_unhash(&exp->exp_handle);
+ class_handle_unhash(&exp->exp_handle);
- cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
- /* delete an uuid-export hashitem from hashtables */
- if (!cfs_hlist_unhashed(&exp->exp_uuid_hash))
- cfs_hash_del(exp->exp_obd->obd_uuid_hash,
- &exp->exp_client_uuid,
- &exp->exp_uuid_hash);
+ spin_lock(&exp->exp_obd->obd_dev_lock);
+ /* delete an uuid-export hashitem from hashtables */
+ if (!cfs_hlist_unhashed(&exp->exp_uuid_hash))
+ cfs_hash_del(exp->exp_obd->obd_uuid_hash,
+ &exp->exp_client_uuid,
+ &exp->exp_uuid_hash);
- cfs_list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
- cfs_list_del_init(&exp->exp_obd_chain_timed);
- exp->exp_obd->obd_num_exports--;
- cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
- class_export_put(exp);
+ cfs_list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
+ cfs_list_del_init(&exp->exp_obd_chain_timed);
+ exp->exp_obd->obd_num_exports--;
+ spin_unlock(&exp->exp_obd->obd_dev_lock);
+ class_export_put(exp);
}
EXPORT_SYMBOL(class_unlink_export);
CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
- cfs_spin_lock_init(&imp->imp_lock);
- imp->imp_last_success_conn = 0;
- imp->imp_state = LUSTRE_IMP_NEW;
- imp->imp_obd = class_incref(obd, "import", imp);
- cfs_mutex_init(&imp->imp_sec_mutex);
+ spin_lock_init(&imp->imp_lock);
+ imp->imp_last_success_conn = 0;
+ imp->imp_state = LUSTRE_IMP_NEW;
+ imp->imp_obd = class_incref(obd, "import", imp);
+ mutex_init(&imp->imp_sec_mutex);
cfs_waitq_init(&imp->imp_recovery_waitq);
cfs_atomic_set(&imp->imp_refcount, 2);
void class_destroy_import(struct obd_import *import)
{
- LASSERT(import != NULL);
- LASSERT(import != LP_POISON);
+ LASSERT(import != NULL);
+ LASSERT(import != LP_POISON);
- class_handle_unhash(&import->imp_handle);
+ class_handle_unhash(&import->imp_handle);
- cfs_spin_lock(&import->imp_lock);
- import->imp_generation++;
- cfs_spin_unlock(&import->imp_lock);
- class_import_put(import);
+ spin_lock(&import->imp_lock);
+ import->imp_generation++;
+ spin_unlock(&import->imp_lock);
+ class_import_put(import);
}
EXPORT_SYMBOL(class_destroy_import);
void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
{
- cfs_spin_lock(&exp->exp_locks_list_guard);
+ spin_lock(&exp->exp_locks_list_guard);
LASSERT(lock->l_exp_refs_nr >= 0);
}
CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
lock, exp, lock->l_exp_refs_nr);
- cfs_spin_unlock(&exp->exp_locks_list_guard);
+ spin_unlock(&exp->exp_locks_list_guard);
}
EXPORT_SYMBOL(__class_export_add_lock_ref);
void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
{
- cfs_spin_lock(&exp->exp_locks_list_guard);
+ spin_lock(&exp->exp_locks_list_guard);
LASSERT(lock->l_exp_refs_nr > 0);
if (lock->l_exp_refs_target != exp) {
LCONSOLE_WARN("lock %p, "
}
CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
lock, exp, lock->l_exp_refs_nr);
- cfs_spin_unlock(&exp->exp_locks_list_guard);
+ spin_unlock(&exp->exp_locks_list_guard);
}
EXPORT_SYMBOL(__class_export_del_lock_ref);
#endif
/* if export is involved in recovery then clean up related things */
void class_export_recovery_cleanup(struct obd_export *exp)
{
- struct obd_device *obd = exp->exp_obd;
-
- cfs_spin_lock(&obd->obd_recovery_task_lock);
- if (exp->exp_delayed)
- obd->obd_delayed_clients--;
- if (obd->obd_recovering && exp->exp_in_recovery) {
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_in_recovery = 0;
- cfs_spin_unlock(&exp->exp_lock);
- LASSERT_ATOMIC_POS(&obd->obd_connected_clients);
- cfs_atomic_dec(&obd->obd_connected_clients);
- }
- cfs_spin_unlock(&obd->obd_recovery_task_lock);
- /** Cleanup req replay fields */
- if (exp->exp_req_replay_needed) {
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_req_replay_needed = 0;
- cfs_spin_unlock(&exp->exp_lock);
- LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients));
- cfs_atomic_dec(&obd->obd_req_replay_clients);
- }
- /** Cleanup lock replay data */
- if (exp->exp_lock_replay_needed) {
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_lock_replay_needed = 0;
- cfs_spin_unlock(&exp->exp_lock);
- LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients));
- cfs_atomic_dec(&obd->obd_lock_replay_clients);
+ struct obd_device *obd = exp->exp_obd;
+
+ spin_lock(&obd->obd_recovery_task_lock);
+ if (exp->exp_delayed)
+ obd->obd_delayed_clients--;
+ if (obd->obd_recovering && exp->exp_in_recovery) {
+ spin_lock(&exp->exp_lock);
+ exp->exp_in_recovery = 0;
+ spin_unlock(&exp->exp_lock);
+ LASSERT_ATOMIC_POS(&obd->obd_connected_clients);
+ cfs_atomic_dec(&obd->obd_connected_clients);
+ }
+ spin_unlock(&obd->obd_recovery_task_lock);
+ /** Cleanup req replay fields */
+ if (exp->exp_req_replay_needed) {
+ spin_lock(&exp->exp_lock);
+ exp->exp_req_replay_needed = 0;
+ spin_unlock(&exp->exp_lock);
+ LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients));
+ cfs_atomic_dec(&obd->obd_req_replay_clients);
}
+ /** Cleanup lock replay data */
+ if (exp->exp_lock_replay_needed) {
+ spin_lock(&exp->exp_lock);
+ exp->exp_lock_replay_needed = 0;
+ spin_unlock(&exp->exp_lock);
+ LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients));
+ cfs_atomic_dec(&obd->obd_lock_replay_clients);
+ }
}
/* This function removes 1-3 references from the export:
RETURN(-EINVAL);
}
- cfs_spin_lock(&export->exp_lock);
- already_disconnected = export->exp_disconnected;
- export->exp_disconnected = 1;
- cfs_spin_unlock(&export->exp_lock);
+ spin_lock(&export->exp_lock);
+ already_disconnected = export->exp_disconnected;
+ export->exp_disconnected = 1;
+ spin_unlock(&export->exp_lock);
/* class_cleanup(), abort_recovery(), and class_fail_export()
* all end up in here, and if any of them race we shouldn't
/* Return non-zero for a fully connected export */
int class_connected_export(struct obd_export *exp)
{
- if (exp) {
- int connected;
- cfs_spin_lock(&exp->exp_lock);
- connected = (exp->exp_conn_cnt > 0);
- cfs_spin_unlock(&exp->exp_lock);
- return connected;
- }
- return 0;
+ if (exp) {
+ int connected;
+ spin_lock(&exp->exp_lock);
+ connected = (exp->exp_conn_cnt > 0);
+ spin_unlock(&exp->exp_lock);
+ return connected;
+ }
+ return 0;
}
EXPORT_SYMBOL(class_connected_export);
/* need for safe call CDEBUG after obd_disconnect */
class_export_get(exp);
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_flags = flags;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
+ exp->exp_flags = flags;
+ spin_unlock(&exp->exp_lock);
if (obd_uuid_equals(&exp->exp_client_uuid,
&exp->exp_obd->obd_uuid)) {
void class_disconnect_exports(struct obd_device *obd)
{
- cfs_list_t work_list;
- ENTRY;
+ cfs_list_t work_list;
+ ENTRY;
- /* Move all of the exports from obd_exports to a work list, en masse. */
- CFS_INIT_LIST_HEAD(&work_list);
- cfs_spin_lock(&obd->obd_dev_lock);
- cfs_list_splice_init(&obd->obd_exports, &work_list);
- cfs_list_splice_init(&obd->obd_delayed_exports, &work_list);
- cfs_spin_unlock(&obd->obd_dev_lock);
+ /* Move all of the exports from obd_exports to a work list, en masse. */
+ CFS_INIT_LIST_HEAD(&work_list);
+ spin_lock(&obd->obd_dev_lock);
+ cfs_list_splice_init(&obd->obd_exports, &work_list);
+ cfs_list_splice_init(&obd->obd_delayed_exports, &work_list);
+ spin_unlock(&obd->obd_dev_lock);
if (!cfs_list_empty(&work_list)) {
CDEBUG(D_HA, "OBD device %d (%p) has exports, "
ENTRY;
CFS_INIT_LIST_HEAD(&work_list);
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
cfs_list_for_each_entry_safe(exp, n, &obd->obd_exports,
exp_obd_chain) {
/* don't count self-export as client */
if (exp->exp_target_data.ted_lr_idx == -1)
continue;
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
if (test_export(exp)) {
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
continue;
}
exp->exp_failed = 1;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
cfs_list_move(&exp->exp_obd_chain, &work_list);
evicted++;
libcfs_nid2str(exp->exp_connection->c_peer.nid));
print_export_data(exp, "EVICTING", 0);
}
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
if (evicted) {
LCONSOLE_WARN("%s: disconnecting %d stale clients\n",
void class_fail_export(struct obd_export *exp)
{
- int rc, already_failed;
+ int rc, already_failed;
- cfs_spin_lock(&exp->exp_lock);
- already_failed = exp->exp_failed;
- exp->exp_failed = 1;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
+ already_failed = exp->exp_failed;
+ exp->exp_failed = 1;
+ spin_unlock(&exp->exp_lock);
if (already_failed) {
CDEBUG(D_HA, "disconnecting dead export %p/%s; skipping\n",
int obd_export_evict_by_nid(struct obd_device *obd, const char *nid)
{
cfs_hash_t *nid_hash;
- struct obd_export *doomed_exp = NULL;
- int exports_evicted = 0;
+ struct obd_export *doomed_exp = NULL;
+ int exports_evicted = 0;
- lnet_nid_t nid_key = libcfs_str2nid((char *)nid);
+ lnet_nid_t nid_key = libcfs_str2nid((char *)nid);
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
/* umount has run already, so evict thread should leave
* its task to umount thread now */
if (obd->obd_stopping) {
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
return exports_evicted;
}
nid_hash = obd->obd_nid_hash;
cfs_hash_getref(nid_hash);
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
- do {
+ do {
doomed_exp = cfs_hash_lookup(nid_hash, &nid_key);
if (doomed_exp == NULL)
break;
int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid)
{
cfs_hash_t *uuid_hash;
- struct obd_export *doomed_exp = NULL;
- struct obd_uuid doomed_uuid;
- int exports_evicted = 0;
+ struct obd_export *doomed_exp = NULL;
+ struct obd_uuid doomed_uuid;
+ int exports_evicted = 0;
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
if (obd->obd_stopping) {
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
return exports_evicted;
}
uuid_hash = obd->obd_uuid_hash;
cfs_hash_getref(uuid_hash);
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
obd_str2uuid(&doomed_uuid, uuid);
if (obd_uuid_equals(&doomed_uuid, &obd->obd_uuid)) {
#endif
static void print_export_data(struct obd_export *exp, const char *status,
- int locks)
-{
- struct ptlrpc_reply_state *rs;
- struct ptlrpc_reply_state *first_reply = NULL;
- int nreplies = 0;
-
- cfs_spin_lock(&exp->exp_lock);
- cfs_list_for_each_entry(rs, &exp->exp_outstanding_replies,
- rs_exp_list) {
- if (nreplies == 0)
- first_reply = rs;
- nreplies++;
- }
- cfs_spin_unlock(&exp->exp_lock);
+ int locks)
+{
+ struct ptlrpc_reply_state *rs;
+ struct ptlrpc_reply_state *first_reply = NULL;
+ int nreplies = 0;
+
+ spin_lock(&exp->exp_lock);
+ cfs_list_for_each_entry(rs, &exp->exp_outstanding_replies,
+ rs_exp_list) {
+ if (nreplies == 0)
+ first_reply = rs;
+ nreplies++;
+ }
+ spin_unlock(&exp->exp_lock);
CDEBUG(D_HA, "%s: %s %p %s %s %d (%d %d %d) %d %d %d %d: %p %s "LPU64"\n",
exp->exp_obd->obd_name, status, exp, exp->exp_client_uuid.uuid,
{
struct obd_export *exp;
- cfs_spin_lock(&obd->obd_dev_lock);
- cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
- print_export_data(exp, "ACTIVE", locks);
- cfs_list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
- print_export_data(exp, "UNLINKED", locks);
- cfs_list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
- print_export_data(exp, "DELAYED", locks);
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_spin_lock(&obd_zombie_impexp_lock);
- cfs_list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
- print_export_data(exp, "ZOMBIE", locks);
- cfs_spin_unlock(&obd_zombie_impexp_lock);
+ spin_lock(&obd->obd_dev_lock);
+ cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
+ print_export_data(exp, "ACTIVE", locks);
+ cfs_list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
+ print_export_data(exp, "UNLINKED", locks);
+ cfs_list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
+ print_export_data(exp, "DELAYED", locks);
+ spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd_zombie_impexp_lock);
+ cfs_list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
+ print_export_data(exp, "ZOMBIE", locks);
+ spin_unlock(&obd_zombie_impexp_lock);
}
EXPORT_SYMBOL(dump_exports);
void obd_exports_barrier(struct obd_device *obd)
{
- int waited = 2;
- LASSERT(cfs_list_empty(&obd->obd_exports));
- cfs_spin_lock(&obd->obd_dev_lock);
- while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
- cfs_spin_unlock(&obd->obd_dev_lock);
+ int waited = 2;
+ LASSERT(cfs_list_empty(&obd->obd_exports));
+ spin_lock(&obd->obd_dev_lock);
+ while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
+ spin_unlock(&obd->obd_dev_lock);
cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
cfs_time_seconds(waited));
if (waited > 5 && IS_PO2(waited)) {
dump_exports(obd, 1);
}
waited *= 2;
- cfs_spin_lock(&obd->obd_dev_lock);
- }
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ }
+ spin_unlock(&obd->obd_dev_lock);
}
EXPORT_SYMBOL(obd_exports_barrier);
*/
void obd_zombie_impexp_cull(void)
{
- struct obd_import *import;
- struct obd_export *export;
- ENTRY;
+ struct obd_import *import;
+ struct obd_export *export;
+ ENTRY;
- do {
- cfs_spin_lock(&obd_zombie_impexp_lock);
+ do {
+ spin_lock(&obd_zombie_impexp_lock);
import = NULL;
if (!cfs_list_empty(&obd_zombie_imports)) {
cfs_list_del_init(&export->exp_obd_chain);
}
- cfs_spin_unlock(&obd_zombie_impexp_lock);
+ spin_unlock(&obd_zombie_impexp_lock);
- if (import != NULL) {
- class_import_destroy(import);
- cfs_spin_lock(&obd_zombie_impexp_lock);
- zombies_count--;
- cfs_spin_unlock(&obd_zombie_impexp_lock);
- }
+ if (import != NULL) {
+ class_import_destroy(import);
+ spin_lock(&obd_zombie_impexp_lock);
+ zombies_count--;
+ spin_unlock(&obd_zombie_impexp_lock);
+ }
- if (export != NULL) {
- class_export_destroy(export);
- cfs_spin_lock(&obd_zombie_impexp_lock);
- zombies_count--;
- cfs_spin_unlock(&obd_zombie_impexp_lock);
- }
+ if (export != NULL) {
+ class_export_destroy(export);
+ spin_lock(&obd_zombie_impexp_lock);
+ zombies_count--;
+ spin_unlock(&obd_zombie_impexp_lock);
+ }
- cfs_cond_resched();
- } while (import != NULL || export != NULL);
- EXIT;
+ cfs_cond_resched();
+ } while (import != NULL || export != NULL);
+ EXIT;
}
-static cfs_completion_t obd_zombie_start;
-static cfs_completion_t obd_zombie_stop;
-static unsigned long obd_zombie_flags;
-static cfs_waitq_t obd_zombie_waitq;
-static pid_t obd_zombie_pid;
+static struct completion obd_zombie_start;
+static struct completion obd_zombie_stop;
+static unsigned long obd_zombie_flags;
+static cfs_waitq_t obd_zombie_waitq;
+static pid_t obd_zombie_pid;
enum {
- OBD_ZOMBIE_STOP = 1 << 1
+ OBD_ZOMBIE_STOP = 0x0001,
};
/**
*/
static int obd_zombie_impexp_check(void *arg)
{
- int rc;
+ int rc;
- cfs_spin_lock(&obd_zombie_impexp_lock);
- rc = (zombies_count == 0) &&
- !cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
- cfs_spin_unlock(&obd_zombie_impexp_lock);
+ spin_lock(&obd_zombie_impexp_lock);
+ rc = (zombies_count == 0) &&
+ !test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+ spin_unlock(&obd_zombie_impexp_lock);
- RETURN(rc);
+ RETURN(rc);
}
/**
* Add export to the obd_zombe thread and notify it.
*/
static void obd_zombie_export_add(struct obd_export *exp) {
- cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
- LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
- cfs_list_del_init(&exp->exp_obd_chain);
- cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
- cfs_spin_lock(&obd_zombie_impexp_lock);
- zombies_count++;
- cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
- cfs_spin_unlock(&obd_zombie_impexp_lock);
+ spin_lock(&exp->exp_obd->obd_dev_lock);
+ LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
+ cfs_list_del_init(&exp->exp_obd_chain);
+ spin_unlock(&exp->exp_obd->obd_dev_lock);
+ spin_lock(&obd_zombie_impexp_lock);
+ zombies_count++;
+ cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
+ spin_unlock(&obd_zombie_impexp_lock);
- obd_zombie_impexp_notify();
+ obd_zombie_impexp_notify();
}
/**
* Add import to the obd_zombe thread and notify it.
*/
static void obd_zombie_import_add(struct obd_import *imp) {
- LASSERT(imp->imp_sec == NULL);
- LASSERT(imp->imp_rq_pool == NULL);
- cfs_spin_lock(&obd_zombie_impexp_lock);
- LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
- zombies_count++;
- cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
- cfs_spin_unlock(&obd_zombie_impexp_lock);
+ LASSERT(imp->imp_sec == NULL);
+ LASSERT(imp->imp_rq_pool == NULL);
+ spin_lock(&obd_zombie_impexp_lock);
+ LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
+ zombies_count++;
+ cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
+ spin_unlock(&obd_zombie_impexp_lock);
- obd_zombie_impexp_notify();
+ obd_zombie_impexp_notify();
}
/**
*/
static int obd_zombie_is_idle(void)
{
- int rc;
+ int rc;
- LASSERT(!cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
- cfs_spin_lock(&obd_zombie_impexp_lock);
- rc = (zombies_count == 0);
- cfs_spin_unlock(&obd_zombie_impexp_lock);
- return rc;
+ LASSERT(!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
+ spin_lock(&obd_zombie_impexp_lock);
+ rc = (zombies_count == 0);
+ spin_unlock(&obd_zombie_impexp_lock);
+ return rc;
}
/**
*/
static int obd_zombie_impexp_thread(void *unused)
{
- int rc;
+ int rc;
- if ((rc = cfs_daemonize_ctxt("obd_zombid"))) {
- cfs_complete(&obd_zombie_start);
- RETURN(rc);
- }
+ rc = cfs_daemonize_ctxt("obd_zombid");
+ if (rc != 0) {
+ complete(&obd_zombie_start);
+ RETURN(rc);
+ }
- cfs_complete(&obd_zombie_start);
+ complete(&obd_zombie_start);
- obd_zombie_pid = cfs_curproc_pid();
+ obd_zombie_pid = cfs_curproc_pid();
- while(!cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
+ while (!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
struct l_wait_info lwi = { 0 };
l_wait_event(obd_zombie_waitq,
cfs_waitq_signal(&obd_zombie_waitq);
}
- cfs_complete(&obd_zombie_stop);
+ complete(&obd_zombie_stop);
- RETURN(0);
+ RETURN(0);
}
#else /* ! KERNEL */
*/
int obd_zombie_impexp_init(void)
{
- int rc;
+ int rc;
- CFS_INIT_LIST_HEAD(&obd_zombie_imports);
- CFS_INIT_LIST_HEAD(&obd_zombie_exports);
- cfs_spin_lock_init(&obd_zombie_impexp_lock);
- cfs_init_completion(&obd_zombie_start);
- cfs_init_completion(&obd_zombie_stop);
- cfs_waitq_init(&obd_zombie_waitq);
- obd_zombie_pid = 0;
+ CFS_INIT_LIST_HEAD(&obd_zombie_imports);
+ CFS_INIT_LIST_HEAD(&obd_zombie_exports);
+ spin_lock_init(&obd_zombie_impexp_lock);
+ init_completion(&obd_zombie_start);
+ init_completion(&obd_zombie_stop);
+ cfs_waitq_init(&obd_zombie_waitq);
+ obd_zombie_pid = 0;
#ifdef __KERNEL__
- rc = cfs_create_thread(obd_zombie_impexp_thread, NULL, 0);
- if (rc < 0)
- RETURN(rc);
+ rc = cfs_create_thread(obd_zombie_impexp_thread, NULL, 0);
+ if (rc < 0)
+ RETURN(rc);
- cfs_wait_for_completion(&obd_zombie_start);
+ wait_for_completion(&obd_zombie_start);
#else
obd_zombie_impexp_work_cb =
*/
void obd_zombie_impexp_stop(void)
{
- cfs_set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+ set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
obd_zombie_impexp_notify();
#ifdef __KERNEL__
- cfs_wait_for_completion(&obd_zombie_stop);
+ wait_for_completion(&obd_zombie_stop);
#else
liblustre_deregister_wait_callback(obd_zombie_impexp_work_cb);
liblustre_deregister_idle_callback(obd_zombie_impexp_idle_cb);
}
int lustre_idmap_add(struct lustre_idmap_table *t,
- uid_t ruid, uid_t luid,
- gid_t rgid, gid_t lgid)
+ uid_t ruid, uid_t luid,
+ gid_t rgid, gid_t lgid)
{
- struct lustre_idmap_entry *e0, *e1;
+ struct lustre_idmap_entry *e0, *e1;
- LASSERT(t);
+ LASSERT(t);
- cfs_spin_lock(&t->lit_lock);
- e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
- cfs_spin_unlock(&t->lit_lock);
- if (!e0) {
- e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
- if (!e0)
- return -ENOMEM;
+ spin_lock(&t->lit_lock);
+ e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
+ spin_unlock(&t->lit_lock);
+ if (!e0) {
+ e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
+ if (!e0)
+ return -ENOMEM;
- cfs_spin_lock(&t->lit_lock);
+ spin_lock(&t->lit_lock);
e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
if (e1 == NULL) {
cfs_list_add_tail(&e0->lie_rmt_uid_hash,
&t->lit_idmaps[LCL_GIDMAP_IDX]
[lustre_idmap_hashfunc(lgid)]);
}
- cfs_spin_unlock(&t->lit_lock);
+ spin_unlock(&t->lit_lock);
if (e1 != NULL) {
idmap_entry_free(e0);
if (IS_ERR(e1))
uid_t ruid, uid_t luid,
gid_t rgid, gid_t lgid)
{
- struct lustre_idmap_entry *e;
- int rc = 0;
+ struct lustre_idmap_entry *e;
+ int rc = 0;
- LASSERT(t);
+ LASSERT(t);
- cfs_spin_lock(&t->lit_lock);
- e = idmap_search_entry(t, ruid, luid, rgid, lgid);
- if (IS_ERR(e))
- rc = PTR_ERR(e);
- else if (e)
- idmap_entry_free(e);
- cfs_spin_unlock(&t->lit_lock);
+ spin_lock(&t->lit_lock);
+ e = idmap_search_entry(t, ruid, luid, rgid, lgid);
+ if (IS_ERR(e))
+ rc = PTR_ERR(e);
+ else if (e)
+ idmap_entry_free(e);
+ spin_unlock(&t->lit_lock);
- return rc;
+ return rc;
}
EXPORT_SYMBOL(lustre_idmap_del);
hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
- cfs_spin_lock(&t->lit_lock);
- uid = idmap_lookup_uid(hash, reverse, uid);
- cfs_spin_unlock(&t->lit_lock);
+ spin_lock(&t->lit_lock);
+ uid = idmap_lookup_uid(hash, reverse, uid);
+ spin_unlock(&t->lit_lock);
- return uid;
+ return uid;
}
EXPORT_SYMBOL(lustre_idmap_lookup_uid);
hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
- cfs_spin_lock(&t->lit_lock);
- gid = idmap_lookup_gid(hash, reverse, gid);
- cfs_spin_unlock(&t->lit_lock);
+ spin_lock(&t->lit_lock);
+ gid = idmap_lookup_gid(hash, reverse, gid);
+ spin_unlock(&t->lit_lock);
- return gid;
+ return gid;
}
EXPORT_SYMBOL(lustre_idmap_lookup_gid);
if(unlikely(t == NULL))
return (ERR_PTR(-ENOMEM));
- cfs_spin_lock_init(&t->lit_lock);
- for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
- for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
- CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
+ spin_lock_init(&t->lit_lock);
+ for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
+ for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
+ CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
- return t;
+ return t;
}
EXPORT_SYMBOL(lustre_idmap_init);
LASSERT(t);
list = t->lit_idmaps[RMT_UIDMAP_IDX];
- cfs_spin_lock(&t->lit_lock);
- for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
- while (!cfs_list_empty(&list[i])) {
- e = cfs_list_entry(list[i].next,
- struct lustre_idmap_entry,
- lie_rmt_uid_hash);
- idmap_entry_free(e);
- }
- cfs_spin_unlock(&t->lit_lock);
-
- OBD_FREE_PTR(t);
+ spin_lock(&t->lit_lock);
+ for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
+ while (!cfs_list_empty(&list[i])) {
+ e = cfs_list_entry(list[i].next,
+ struct lustre_idmap_entry,
+ lie_rmt_uid_hash);
+ idmap_entry_free(e);
+ }
+ spin_unlock(&t->lit_lock);
+
+ OBD_FREE_PTR(t);
}
EXPORT_SYMBOL(lustre_idmap_fini);
if (libcfs_catastrophe)
rc += snprintf(page + rc, count - rc, "LBUG\n");
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
for (i = 0; i < class_devno_max(); i++) {
struct obd_device *obd;
continue;
class_incref(obd, __FUNCTION__, cfs_current());
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
if (obd_health_check(NULL, obd)) {
rc += snprintf(page + rc, count - rc,
obd->obd_name);
}
class_decref(obd, __FUNCTION__, cfs_current());
- cfs_read_lock(&obd_dev_lock);
+ read_lock(&obd_dev_lock);
}
- cfs_read_unlock(&obd_dev_lock);
+ read_unlock(&obd_dev_lock);
if (rc == 0)
return snprintf(page, count, "healthy\n");
i_size_write(dst, src->o_size);
/* optimum IO size */
if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
- dst->i_blkbits = cfs_ffs(src->o_blksize) - 1;
+ dst->i_blkbits = ffs(src->o_blksize) - 1;
if (dst->i_blkbits < CFS_PAGE_SHIFT)
dst->i_blkbits = CFS_PAGE_SHIFT;
}
if (valid & OBD_MD_FLBLKSZ)
- dst->i_blkbits = cfs_ffs(src->o_blksize)-1;
+ dst->i_blkbits = ffs(src->o_blksize)-1;
if (valid & OBD_MD_FLMODE)
dst->i_mode = (dst->i_mode & S_IFMT) | (src->o_mode & ~S_IFMT);
if (valid & OBD_MD_FLUID)
if (loghandle == NULL)
return ERR_PTR(-ENOMEM);
- cfs_init_rwsem(&loghandle->lgh_lock);
- cfs_spin_lock_init(&loghandle->lgh_hdr_lock);
+ init_rwsem(&loghandle->lgh_lock);
+ spin_lock_init(&loghandle->lgh_hdr_lock);
CFS_INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
return loghandle;
RETURN(-EINVAL);
}
- cfs_spin_lock(&loghandle->lgh_hdr_lock);
+ spin_lock(&loghandle->lgh_hdr_lock);
if (!ext2_clear_bit(index, llh->llh_bitmap)) {
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
- CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index);
- RETURN(-ENOENT);
- }
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index);
+ RETURN(-ENOENT);
+ }
- llh->llh_count--;
+ llh->llh_count--;
- if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
- (llh->llh_count == 1) &&
- (loghandle->lgh_last_idx == (LLOG_BITMAP_BYTES * 8) - 1)) {
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+ if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
+ (llh->llh_count == 1) &&
+ (loghandle->lgh_last_idx == (LLOG_BITMAP_BYTES * 8) - 1)) {
+ spin_unlock(&loghandle->lgh_hdr_lock);
rc = llog_destroy(env, loghandle);
if (rc < 0) {
CERROR("%s: can't destroy empty llog #"LPX64"#"LPX64
}
RETURN(1);
}
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+ spin_unlock(&loghandle->lgh_hdr_lock);
rc = llog_write(env, loghandle, &llh->llh_hdr, NULL, 0, NULL, 0);
if (rc < 0) {
}
RETURN(0);
out_err:
- cfs_spin_lock(&loghandle->lgh_hdr_lock);
+ spin_lock(&loghandle->lgh_hdr_lock);
ext2_set_bit(index, llh->llh_bitmap);
llh->llh_count++;
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+ spin_unlock(&loghandle->lgh_hdr_lock);
return rc;
}
EXPORT_SYMBOL(llog_cancel_rec);
lu_env_fini(&env);
out:
- cfs_complete(&lpi->lpi_completion);
+ complete(&lpi->lpi_completion);
return rc;
}
#endif
/* The new thread can't use parent env,
* init the new one in llog_process_thread_daemonize. */
lpi->lpi_env = NULL;
- cfs_init_completion(&lpi->lpi_completion);
+ init_completion(&lpi->lpi_completion);
rc = cfs_create_thread(llog_process_thread_daemonize, lpi,
CFS_DAEMON_FLAGS);
if (rc < 0) {
OBD_FREE_PTR(lpi);
RETURN(rc);
}
- cfs_wait_for_completion(&lpi->lpi_completion);
+ wait_for_completion(&lpi->lpi_completion);
} else {
lpi->lpi_env = env;
llog_process_thread(lpi);
if (rc)
GOTO(out_trans, rc);
- cfs_down_write(&loghandle->lgh_lock);
+ down_write(&loghandle->lgh_lock);
rc = llog_write_rec(env, loghandle, rec, reccookie,
cookiecount, buf, idx, th);
- cfs_up_write(&loghandle->lgh_lock);
+ up_write(&loghandle->lgh_lock);
out_trans:
dt_trans_stop(env, dt, th);
} else { /* lvfs compatibility */
- cfs_down_write(&loghandle->lgh_lock);
+ down_write(&loghandle->lgh_lock);
rc = llog_write_rec(env, loghandle, rec, reccookie,
cookiecount, buf, idx, NULL);
- cfs_up_write(&loghandle->lgh_lock);
+ up_write(&loghandle->lgh_lock);
}
RETURN(rc);
}
if (index == 0)
index = 1;
- cfs_spin_lock(&loghandle->lgh_hdr_lock);
+ spin_lock(&loghandle->lgh_hdr_lock);
llh->llh_count++;
- if (ext2_set_bit(index, llh->llh_bitmap)) {
- CERROR("argh, index %u already set in log bitmap?\n",
- index);
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
- LBUG(); /* should never happen */
- }
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+ if (ext2_set_bit(index, llh->llh_bitmap)) {
+ CERROR("argh, index %u already set in log bitmap?\n",
+ index);
+ spin_unlock(&loghandle->lgh_hdr_lock);
+ LBUG(); /* should never happen */
+ }
+ spin_unlock(&loghandle->lgh_hdr_lock);
cathandle->lgh_last_idx = index;
llh->llh_tail.lrt_index = index;
if (cathandle == NULL)
RETURN(-EBADF);
- cfs_down_write(&cathandle->lgh_lock);
+ down_write(&cathandle->lgh_lock);
cfs_list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
u.phd.phd_entry) {
struct llog_logid *cgl = &loghandle->lgh_id;
continue;
}
loghandle->u.phd.phd_cat_handle = cathandle;
- cfs_up_write(&cathandle->lgh_lock);
+ up_write(&cathandle->lgh_lock);
GOTO(out, rc = 0);
}
}
- cfs_up_write(&cathandle->lgh_lock);
+ up_write(&cathandle->lgh_lock);
rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
LLOG_OPEN_EXISTS);
GOTO(out, rc);
}
- cfs_down_write(&cathandle->lgh_lock);
+ down_write(&cathandle->lgh_lock);
cfs_list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
- cfs_up_write(&cathandle->lgh_lock);
+ up_write(&cathandle->lgh_lock);
loghandle->u.phd.phd_cat_handle = cathandle;
loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
struct llog_handle *loghandle = NULL;
ENTRY;
- cfs_down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
+ down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
loghandle = cathandle->u.chd.chd_current_log;
if (loghandle) {
struct llog_log_hdr *llh;
- cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
llh = loghandle->lgh_hdr;
if (llh == NULL ||
loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
- cfs_up_read(&cathandle->lgh_lock);
+ up_read(&cathandle->lgh_lock);
RETURN(loghandle);
} else {
- cfs_up_write(&loghandle->lgh_lock);
+ up_write(&loghandle->lgh_lock);
}
}
- cfs_up_read(&cathandle->lgh_lock);
+ up_read(&cathandle->lgh_lock);
/* time to use next log */
/* first, we have to make sure the state hasn't changed */
- cfs_down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
+ down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
loghandle = cathandle->u.chd.chd_current_log;
if (loghandle) {
struct llog_log_hdr *llh;
- cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
llh = loghandle->lgh_hdr;
LASSERT(llh);
if (loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
- cfs_up_write(&cathandle->lgh_lock);
+ up_write(&cathandle->lgh_lock);
RETURN(loghandle);
} else {
- cfs_up_write(&loghandle->lgh_lock);
+ up_write(&loghandle->lgh_lock);
}
}
loghandle = cathandle->u.chd.chd_next_log;
cathandle->u.chd.chd_current_log = loghandle;
cathandle->u.chd.chd_next_log = NULL;
- cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
- cfs_up_write(&cathandle->lgh_lock);
+ down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+ up_write(&cathandle->lgh_lock);
LASSERT(loghandle);
RETURN(loghandle);
}
if (!llog_exist(loghandle)) {
rc = llog_cat_new_log(env, cathandle, loghandle, th);
if (rc < 0) {
- cfs_up_write(&loghandle->lgh_lock);
+ up_write(&loghandle->lgh_lock);
RETURN(rc);
}
}
rc = llog_write_rec(env, loghandle, rec, reccookie, 1, buf, -1, th);
if (rc < 0)
CERROR("llog_write_rec %d: lh=%p\n", rc, loghandle);
- cfs_up_write(&loghandle->lgh_lock);
+ up_write(&loghandle->lgh_lock);
if (rc == -ENOSPC) {
/* try to use next log */
loghandle = llog_cat_current_log(cathandle, th);
if (!llog_exist(loghandle)) {
rc = llog_cat_new_log(env, cathandle, loghandle, th);
if (rc < 0) {
- cfs_up_write(&loghandle->lgh_lock);
+ up_write(&loghandle->lgh_lock);
RETURN(rc);
}
}
-1, th);
if (rc < 0)
CERROR("llog_write_rec %d: lh=%p\n", rc, loghandle);
- cfs_up_write(&loghandle->lgh_lock);
+ up_write(&loghandle->lgh_lock);
}
RETURN(rc);
if (cathandle->u.chd.chd_current_log == NULL) {
/* declare new plain llog */
- cfs_down_write(&cathandle->lgh_lock);
+ down_write(&cathandle->lgh_lock);
if (cathandle->u.chd.chd_current_log == NULL) {
rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
NULL, NULL, LLOG_OPEN_NEW);
&cathandle->u.chd.chd_head);
}
}
- cfs_up_write(&cathandle->lgh_lock);
+ up_write(&cathandle->lgh_lock);
} else if (cathandle->u.chd.chd_next_log == NULL) {
/* declare next plain llog */
- cfs_down_write(&cathandle->lgh_lock);
+ down_write(&cathandle->lgh_lock);
if (cathandle->u.chd.chd_next_log == NULL) {
rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
NULL, NULL, LLOG_OPEN_NEW);
&cathandle->u.chd.chd_head);
}
}
- cfs_up_write(&cathandle->lgh_lock);
+ up_write(&cathandle->lgh_lock);
}
if (rc)
GOTO(out, rc);
lrc = llog_cancel_rec(env, loghandle, cookies->lgc_index);
if (lrc == 1) { /* log has been destroyed */
index = loghandle->u.phd.phd_cookie.lgc_index;
- cfs_down_write(&cathandle->lgh_lock);
+ down_write(&cathandle->lgh_lock);
if (cathandle->u.chd.chd_current_log == loghandle)
cathandle->u.chd.chd_current_log = NULL;
- cfs_up_write(&cathandle->lgh_lock);
+ up_write(&cathandle->lgh_lock);
llog_close(env, loghandle);
LASSERT(index);
void *lpi_catdata;
int lpi_rc;
int lpi_flags;
- cfs_completion_t lpi_completion;
+ struct completion lpi_completion;
const struct lu_env *lpi_env;
};
CDEBUG(D_IOCTL, "cannot destroy log\n");
GOTO(out, rc);
}
- cfs_down_write(&cat->lgh_lock);
+ down_write(&cat->lgh_lock);
if (cat->u.chd.chd_current_log == log)
cat->u.chd.chd_current_log = NULL;
- cfs_up_write(&cat->lgh_lock);
+ up_write(&cat->lgh_lock);
llog_cat_set_first_idx(cat, index);
rc = llog_cancel_rec(env, cat, index);
out:
if (!idarray)
RETURN(-ENOMEM);
- cfs_mutex_lock(&obd->obd_olg.olg_cat_processing);
+ mutex_lock(&obd->obd_olg.olg_cat_processing);
rc = llog_get_cat_list(obd, name, 0, count, idarray);
if (rc)
GOTO(out, rc);
}
out:
/* release semaphore */
- cfs_mutex_unlock(&obd->obd_olg.olg_cat_processing);
+ mutex_unlock(&obd->obd_olg.olg_cat_processing);
OBD_FREE_LARGE(idarray, size);
RETURN(rc);
/*The caller should make sure only 1 process access the lgh_last_idx,
*Otherwise it might hit the assert.*/
LASSERT(index < LLOG_BITMAP_SIZE(llh));
- cfs_spin_lock(&loghandle->lgh_hdr_lock);
+ spin_lock(&loghandle->lgh_hdr_lock);
if (ext2_set_bit(index, llh->llh_bitmap)) {
CERROR("argh, index %u already set in log bitmap?\n", index);
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+ spin_unlock(&loghandle->lgh_hdr_lock);
LBUG(); /* should never happen */
}
llh->llh_count++;
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+ spin_unlock(&loghandle->lgh_hdr_lock);
llh->llh_tail.lrt_index = index;
rc = llog_lvfs_write_blob(obd, file, &llh->llh_hdr, NULL, 0);
struct obd_device *obd;
int rc = 0;
- cfs_spin_lock(&olg->olg_lock);
- if (!cfs_atomic_dec_and_test(&ctxt->loc_refcount)) {
- cfs_spin_unlock(&olg->olg_lock);
- return rc;
- }
- olg->olg_ctxts[ctxt->loc_idx] = NULL;
- cfs_spin_unlock(&olg->olg_lock);
+ spin_lock(&olg->olg_lock);
+ if (!cfs_atomic_dec_and_test(&ctxt->loc_refcount)) {
+ spin_unlock(&olg->olg_lock);
+ return rc;
+ }
+ olg->olg_ctxts[ctxt->loc_idx] = NULL;
+ spin_unlock(&olg->olg_lock);
- if (ctxt->loc_lcm)
- lcm_put(ctxt->loc_lcm);
+ if (ctxt->loc_lcm)
+ lcm_put(ctxt->loc_lcm);
- obd = ctxt->loc_obd;
- cfs_spin_lock(&obd->obd_dev_lock);
- /* sync with llog ctxt user thread */
- cfs_spin_unlock(&obd->obd_dev_lock);
+ obd = ctxt->loc_obd;
+ spin_lock(&obd->obd_dev_lock);
+ /* sync with llog ctxt user thread */
+ spin_unlock(&obd->obd_dev_lock);
/* obd->obd_starting is needed for the case of cleanup
* in error case while obd is starting up. */
ctxt->loc_olg = olg;
ctxt->loc_idx = index;
ctxt->loc_logops = op;
- cfs_mutex_init(&ctxt->loc_mutex);
+ mutex_init(&ctxt->loc_mutex);
ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
/* The caller should make sure only 1 process access the lgh_last_idx,
* Otherwise it might hit the assert.*/
LASSERT(index < LLOG_BITMAP_SIZE(llh));
- cfs_spin_lock(&loghandle->lgh_hdr_lock);
+ spin_lock(&loghandle->lgh_hdr_lock);
if (ext2_set_bit(index, llh->llh_bitmap)) {
CERROR("%s: index %u already set in log bitmap\n",
o->do_lu.lo_dev->ld_obd->obd_name, index);
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+ spin_unlock(&loghandle->lgh_hdr_lock);
LBUG(); /* should never happen */
}
llh->llh_count++;
- cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+ spin_unlock(&loghandle->lgh_hdr_lock);
llh->llh_tail.lrt_index = index;
lgi->lgi_off = 0;
if (IS_ERR(ls))
RETURN(PTR_ERR(ls));
- cfs_mutex_lock(&ls->ls_los_mutex);
+ mutex_lock(&ls->ls_los_mutex);
los = dt_los_find(ls, FID_SEQ_LLOG);
- cfs_mutex_unlock(&ls->ls_los_mutex);
+ mutex_unlock(&ls->ls_los_mutex);
LASSERT(los);
ls_device_put(env, ls);
if (IS_ERR(ls))
RETURN(PTR_ERR(ls));
- cfs_mutex_lock(&ls->ls_los_mutex);
+ mutex_lock(&ls->ls_los_mutex);
los = dt_los_find(ls, FID_SEQ_LLOG);
- cfs_mutex_unlock(&ls->ls_los_mutex);
+ mutex_unlock(&ls->ls_los_mutex);
if (los != NULL) {
dt_los_put(los);
local_oid_storage_fini(env, los);
/* all initialized local storages on this node are linked on this */
static CFS_LIST_HEAD(ls_list_head);
-static CFS_DEFINE_MUTEX(ls_list_mutex);
+static DEFINE_MUTEX(ls_list_mutex);
static int ls_object_init(const struct lu_env *env, struct lu_object *o,
const struct lu_object_conf *unused)
{
struct ls_device *ls;
- cfs_mutex_lock(&ls_list_mutex);
+ mutex_lock(&ls_list_mutex);
ls = __ls_find_dev(dev);
- cfs_mutex_unlock(&ls_list_mutex);
+ mutex_unlock(&ls_list_mutex);
return ls;
}
ENTRY;
- cfs_mutex_lock(&ls_list_mutex);
+ mutex_lock(&ls_list_mutex);
ls = __ls_find_dev(dev);
if (ls)
GOTO(out_ls, ls);
cfs_atomic_set(&ls->ls_refcount, 1);
CFS_INIT_LIST_HEAD(&ls->ls_los_list);
- cfs_mutex_init(&ls->ls_los_mutex);
+ mutex_init(&ls->ls_los_mutex);
ls->ls_osd = dev;
/* finally add ls to the list */
cfs_list_add(&ls->ls_linkage, &ls_list_head);
out_ls:
- cfs_mutex_unlock(&ls_list_mutex);
+ mutex_unlock(&ls_list_mutex);
RETURN(ls);
}
if (!cfs_atomic_dec_and_test(&ls->ls_refcount))
return;
- cfs_mutex_lock(&ls_list_mutex);
+ mutex_lock(&ls_list_mutex);
if (cfs_atomic_read(&ls->ls_refcount) == 0) {
LASSERT(cfs_list_empty(&ls->ls_los_list));
cfs_list_del(&ls->ls_linkage);
lu_device_fini(&ls->ls_top_dev.dd_lu_dev);
OBD_FREE_PTR(ls);
}
- cfs_mutex_unlock(&ls_list_mutex);
+ mutex_unlock(&ls_list_mutex);
}
/**
* the latest generated fid atomically with
* object creation see local_object_create() */
- cfs_mutex_lock(&los->los_id_lock);
+ mutex_lock(&los->los_id_lock);
fid->f_seq = los->los_seq;
fid->f_oid = los->los_last_oid++;
fid->f_ver = 0;
- cfs_mutex_unlock(&los->los_id_lock);
+ mutex_unlock(&los->los_id_lock);
return 0;
}
/* many threads can be updated this, serialize
* them here to avoid the race where one thread
* takes the value first, but writes it last */
- cfs_mutex_lock(&los->los_id_lock);
+ mutex_lock(&los->los_id_lock);
/* update local oid number on disk so that
* we know the last one used after reboot */
dti->dti_lb.lb_len = sizeof(losd);
rc = dt_record_write(env, los->los_obj, &dti->dti_lb, &dti->dti_off,
th);
- cfs_mutex_unlock(&los->los_id_lock);
+ mutex_unlock(&los->los_id_lock);
RETURN(rc);
}
if (IS_ERR(ls))
RETURN(PTR_ERR(ls));
- cfs_mutex_lock(&ls->ls_los_mutex);
+ mutex_lock(&ls->ls_los_mutex);
*los = dt_los_find(ls, fid_seq(first_fid));
if (*los != NULL)
GOTO(out, rc = 0);
GOTO(out, rc = -ENOMEM);
cfs_atomic_set(&(*los)->los_refcount, 1);
- cfs_mutex_init(&(*los)->los_id_lock);
+ mutex_init(&(*los)->los_id_lock);
(*los)->los_dev = &ls->ls_top_dev;
cfs_atomic_inc(&ls->ls_refcount);
cfs_list_add(&(*los)->los_list, &ls->ls_los_list);
(*los)->los_obj = o;
}
out:
- cfs_mutex_unlock(&ls->ls_los_mutex);
+ mutex_unlock(&ls->ls_los_mutex);
ls_device_put(env, ls);
return rc;
}
LASSERT(los->los_dev);
ls = dt2ls_dev(los->los_dev);
- cfs_mutex_lock(&ls->ls_los_mutex);
+ mutex_lock(&ls->ls_los_mutex);
if (cfs_atomic_read(&los->los_refcount) == 0) {
if (los->los_obj)
lu_object_put_nocache(env, &los->los_obj->do_lu);
cfs_list_del(&los->los_list);
OBD_FREE_PTR(los);
}
- cfs_mutex_unlock(&ls->ls_los_mutex);
+ mutex_unlock(&ls->ls_los_mutex);
ls_device_put(env, ls);
}
EXPORT_SYMBOL(local_oid_storage_fini);
struct dt_device *ls_osd;
/* list of all local OID storages */
cfs_list_t ls_los_list;
- cfs_mutex_t ls_los_mutex;
+ struct mutex ls_los_mutex;
};
static inline struct ls_device *dt2ls_dev(struct dt_device *d)
LASSERT(atomic_read(&job->js_refcount) == 0);
LASSERT(job->js_jobstats);
- cfs_write_lock(&job->js_jobstats->ojs_lock);
+ write_lock(&job->js_jobstats->ojs_lock);
cfs_list_del_init(&job->js_list);
- cfs_write_unlock(&job->js_jobstats->ojs_lock);
+ write_unlock(&job->js_jobstats->ojs_lock);
lprocfs_free_stats(&job->js_stats);
OBD_FREE_PTR(job);
* "job2" was initialized in job_alloc() already. LU-2163 */
} else {
LASSERT(cfs_list_empty(&job->js_list));
- cfs_write_lock(&stats->ojs_lock);
+ write_lock(&stats->ojs_lock);
cfs_list_add_tail(&job->js_list, &stats->ojs_list);
- cfs_write_unlock(&stats->ojs_lock);
+ write_unlock(&stats->ojs_lock);
}
found:
loff_t off = *pos;
struct job_stat *job;
- cfs_read_lock(&stats->ojs_lock);
+ read_lock(&stats->ojs_lock);
if (off == 0)
return SEQ_START_TOKEN;
off--;
{
struct obd_job_stats *stats = p->private;
- cfs_read_unlock(&stats->ojs_lock);
+ read_unlock(&stats->ojs_lock);
}
static void *lprocfs_jobstats_seq_next(struct seq_file *p, void *v, loff_t *pos)
RETURN(-ENOMEM);
CFS_INIT_LIST_HEAD(&stats->ojs_list);
- cfs_rwlock_init(&stats->ojs_lock);
+ rwlock_init(&stats->ojs_lock);
stats->ojs_cntr_num = cntr_num;
stats->ojs_cntr_init_fn = init_fn;
stats->ojs_cleanup_interval = 600; /* 10 mins by default */
#define MAX_STRING_SIZE 128
/* for bug 10866, global variable */
-CFS_DECLARE_RWSEM(_lprocfs_lock);
+DECLARE_RWSEM(_lprocfs_lock);
EXPORT_SYMBOL(_lprocfs_lock);
int lprocfs_single_release(struct inode *inode, struct file *file)
"]\n"
" connection:\n"
" failover_nids: [");
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
j = 0;
cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
i += snprintf(page + i, count - i, "%s%s", j ? ", " : "",
imp->imp_conn_cnt,
imp->imp_generation,
cfs_atomic_read(&imp->imp_inval_count));
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret);
if (ret.lc_count != 0) {
stats->ls_num = num;
stats->ls_biggest_alloc_num = 1;
stats->ls_flags = flags;
- cfs_spin_lock_init(&stats->ls_lock);
+ spin_lock_init(&stats->ls_lock);
percpusize = offsetof(struct lprocfs_percpu, lp_cntr[num]);
if (num_entry > 1)
CDEBUG(D_INFO,"refcnt %d\n", cfs_atomic_read(&stat->nid_exp_ref_count));
if (cfs_atomic_read(&stat->nid_exp_ref_count) == 1) {
/* object has only hash references. */
- cfs_spin_lock(&stat->nid_obd->obd_nid_lock);
- cfs_list_move(&stat->nid_list, data);
- cfs_spin_unlock(&stat->nid_obd->obd_nid_lock);
+ spin_lock(&stat->nid_obd->obd_nid_lock);
+ cfs_list_move(&stat->nid_list, data);
+ spin_unlock(&stat->nid_obd->obd_nid_lock);
RETURN(1);
}
/* we has reference to object - only clear data*/
exp->exp_nid_stats = new_stat;
*newnid = 1;
/* protect competitive add to list, not need locking on destroy */
- cfs_spin_lock(&obd->obd_nid_lock);
- cfs_list_add(&new_stat->nid_list, &obd->obd_nid_stats);
- cfs_spin_unlock(&obd->obd_nid_lock);
+ spin_lock(&obd->obd_nid_lock);
+ cfs_list_add(&new_stat->nid_list, &obd->obd_nid_stats);
+ spin_unlock(&obd->obd_nid_lock);
RETURN(rc);
void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
{
- if (value >= OBD_HIST_MAX)
- value = OBD_HIST_MAX - 1;
+ if (value >= OBD_HIST_MAX)
+ value = OBD_HIST_MAX - 1;
- cfs_spin_lock(&oh->oh_lock);
- oh->oh_buckets[value]++;
- cfs_spin_unlock(&oh->oh_lock);
+ spin_lock(&oh->oh_lock);
+ oh->oh_buckets[value]++;
+ spin_unlock(&oh->oh_lock);
}
EXPORT_SYMBOL(lprocfs_oh_tally);
void lprocfs_oh_clear(struct obd_histogram *oh)
{
- cfs_spin_lock(&oh->oh_lock);
- memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
- cfs_spin_unlock(&oh->oh_lock);
+ spin_lock(&oh->oh_lock);
+ memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
+ spin_unlock(&oh->oh_lock);
}
EXPORT_SYMBOL(lprocfs_oh_clear);
*/
void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
{
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE,
+ set_bit(LU_OBJECT_HEARD_BANSHEE,
&o->lo_header->loh_flags);
return lu_object_put(env, o);
}
* Global list of all sites on this node
*/
static CFS_LIST_HEAD(lu_sites);
-static CFS_DEFINE_MUTEX(lu_sites_guard);
+static DEFINE_MUTEX(lu_sites_guard);
/**
* Global environment used by site shrinker.
void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
{
- cfs_spin_lock(&s->ls_ld_lock);
+ spin_lock(&s->ls_ld_lock);
if (cfs_list_empty(&d->ld_linkage))
cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage);
- cfs_spin_unlock(&s->ls_ld_lock);
+ spin_unlock(&s->ls_ld_lock);
}
EXPORT_SYMBOL(lu_dev_add_linkage);
void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
{
- cfs_spin_lock(&s->ls_ld_lock);
+ spin_lock(&s->ls_ld_lock);
cfs_list_del_init(&d->ld_linkage);
- cfs_spin_unlock(&s->ls_ld_lock);
+ spin_unlock(&s->ls_ld_lock);
}
EXPORT_SYMBOL(lu_dev_del_linkage);
lu_ref_add(&top->ld_reference, "site-top", s);
CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
- cfs_spin_lock_init(&s->ls_ld_lock);
+ spin_lock_init(&s->ls_ld_lock);
lu_dev_add_linkage(s, top);
- RETURN(0);
+ RETURN(0);
}
EXPORT_SYMBOL(lu_site_init);
*/
void lu_site_fini(struct lu_site *s)
{
- cfs_mutex_lock(&lu_sites_guard);
+ mutex_lock(&lu_sites_guard);
cfs_list_del_init(&s->ls_linkage);
- cfs_mutex_unlock(&lu_sites_guard);
+ mutex_unlock(&lu_sites_guard);
if (s->ls_obj_hash != NULL) {
cfs_hash_putref(s->ls_obj_hash);
int lu_site_init_finish(struct lu_site *s)
{
int result;
- cfs_mutex_lock(&lu_sites_guard);
+ mutex_lock(&lu_sites_guard);
result = lu_context_refill(&lu_shrink_env.le_ctx);
if (result == 0)
cfs_list_add(&s->ls_linkage, &lu_sites);
- cfs_mutex_unlock(&lu_sites_guard);
+ mutex_unlock(&lu_sites_guard);
return result;
}
EXPORT_SYMBOL(lu_site_init_finish);
LASSERT(key->lct_owner != NULL);
result = -ENFILE;
- cfs_spin_lock(&lu_keys_guard);
+ spin_lock(&lu_keys_guard);
for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
if (lu_keys[i] == NULL) {
key->lct_index = i;
break;
}
}
- cfs_spin_unlock(&lu_keys_guard);
- return result;
+ spin_unlock(&lu_keys_guard);
+ return result;
}
EXPORT_SYMBOL(lu_context_key_register);
*/
void lu_context_key_degister(struct lu_context_key *key)
{
- LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
- LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
+ LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
+ LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
- lu_context_key_quiesce(key);
+ lu_context_key_quiesce(key);
- ++key_set_version;
- cfs_spin_lock(&lu_keys_guard);
- key_fini(&lu_shrink_env.le_ctx, key->lct_index);
- if (lu_keys[key->lct_index]) {
- lu_keys[key->lct_index] = NULL;
- lu_ref_fini(&key->lct_reference);
- }
- cfs_spin_unlock(&lu_keys_guard);
+ ++key_set_version;
+ spin_lock(&lu_keys_guard);
+ key_fini(&lu_shrink_env.le_ctx, key->lct_index);
+ if (lu_keys[key->lct_index]) {
+ lu_keys[key->lct_index] = NULL;
+ lu_ref_fini(&key->lct_reference);
+ }
+ spin_unlock(&lu_keys_guard);
- LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
- "key has instances: %d\n",
- cfs_atomic_read(&key->lct_used));
+ LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
+ "key has instances: %d\n",
+ cfs_atomic_read(&key->lct_used));
}
EXPORT_SYMBOL(lu_context_key_degister);
/*
* XXX memory barrier has to go here.
*/
- cfs_spin_lock(&lu_keys_guard);
- cfs_list_for_each_entry(ctx, &lu_context_remembered,
- lc_remember)
- key_fini(ctx, key->lct_index);
- cfs_spin_unlock(&lu_keys_guard);
- ++key_set_version;
- }
+ spin_lock(&lu_keys_guard);
+ cfs_list_for_each_entry(ctx, &lu_context_remembered,
+ lc_remember)
+ key_fini(ctx, key->lct_index);
+ spin_unlock(&lu_keys_guard);
+ ++key_set_version;
+ }
}
EXPORT_SYMBOL(lu_context_key_quiesce);
{
int rc;
- memset(ctx, 0, sizeof *ctx);
- ctx->lc_state = LCS_INITIALIZED;
- ctx->lc_tags = tags;
- if (tags & LCT_REMEMBER) {
- cfs_spin_lock(&lu_keys_guard);
- cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
- cfs_spin_unlock(&lu_keys_guard);
+ memset(ctx, 0, sizeof *ctx);
+ ctx->lc_state = LCS_INITIALIZED;
+ ctx->lc_tags = tags;
+ if (tags & LCT_REMEMBER) {
+ spin_lock(&lu_keys_guard);
+ cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
+ spin_unlock(&lu_keys_guard);
} else {
CFS_INIT_LIST_HEAD(&ctx->lc_remember);
}
keys_fini(ctx);
} else { /* could race with key degister */
- cfs_spin_lock(&lu_keys_guard);
+ spin_lock(&lu_keys_guard);
keys_fini(ctx);
cfs_list_del_init(&ctx->lc_remember);
- cfs_spin_unlock(&lu_keys_guard);
+ spin_unlock(&lu_keys_guard);
}
}
EXPORT_SYMBOL(lu_context_fini);
void lu_context_tags_update(__u32 tags)
{
- cfs_spin_lock(&lu_keys_guard);
- lu_context_tags_default |= tags;
- key_set_version ++;
- cfs_spin_unlock(&lu_keys_guard);
+ spin_lock(&lu_keys_guard);
+ lu_context_tags_default |= tags;
+ key_set_version++;
+ spin_unlock(&lu_keys_guard);
}
EXPORT_SYMBOL(lu_context_tags_update);
void lu_context_tags_clear(__u32 tags)
{
- cfs_spin_lock(&lu_keys_guard);
- lu_context_tags_default &= ~tags;
- key_set_version ++;
- cfs_spin_unlock(&lu_keys_guard);
+ spin_lock(&lu_keys_guard);
+ lu_context_tags_default &= ~tags;
+ key_set_version++;
+ spin_unlock(&lu_keys_guard);
}
EXPORT_SYMBOL(lu_context_tags_clear);
void lu_session_tags_update(__u32 tags)
{
- cfs_spin_lock(&lu_keys_guard);
- lu_session_tags_default |= tags;
- key_set_version ++;
- cfs_spin_unlock(&lu_keys_guard);
+ spin_lock(&lu_keys_guard);
+ lu_session_tags_default |= tags;
+ key_set_version++;
+ spin_unlock(&lu_keys_guard);
}
EXPORT_SYMBOL(lu_session_tags_update);
void lu_session_tags_clear(__u32 tags)
{
- cfs_spin_lock(&lu_keys_guard);
- lu_session_tags_default &= ~tags;
- key_set_version ++;
- cfs_spin_unlock(&lu_keys_guard);
+ spin_lock(&lu_keys_guard);
+ lu_session_tags_default &= ~tags;
+ key_set_version++;
+ spin_unlock(&lu_keys_guard);
}
EXPORT_SYMBOL(lu_session_tags_clear);
CDEBUG(D_INODE, "Shrink %d objects\n", remain);
- cfs_mutex_lock(&lu_sites_guard);
+ mutex_lock(&lu_sites_guard);
cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
if (shrink_param(sc, nr_to_scan) != 0) {
remain = lu_site_purge(&lu_shrink_env, s, remain);
break;
}
cfs_list_splice(&splice, lu_sites.prev);
- cfs_mutex_unlock(&lu_sites_guard);
+ mutex_unlock(&lu_sites_guard);
cached = (cached / 100) * sysctl_vfs_cache_pressure;
if (shrink_param(sc, nr_to_scan) == 0)
* conservatively. This should not be too bad, because this
* environment is global.
*/
- cfs_mutex_lock(&lu_sites_guard);
+ mutex_lock(&lu_sites_guard);
result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
- cfs_mutex_unlock(&lu_sites_guard);
+ mutex_unlock(&lu_sites_guard);
if (result != 0)
return result;
* Tear shrinker environment down _after_ de-registering
* lu_global_key, because the latter has a value in the former.
*/
- cfs_mutex_lock(&lu_sites_guard);
+ mutex_lock(&lu_sites_guard);
lu_env_fini(&lu_shrink_env);
- cfs_mutex_unlock(&lu_sites_guard);
+ mutex_unlock(&lu_sites_guard);
lu_ref_global_fini();
}
* Asserts a condition for a given lu_ref. Must be called with
* lu_ref::lf_guard held.
*/
-#define REFASSERT(ref, expr) do { \
- struct lu_ref *__tmp = (ref); \
- \
- if (unlikely(!(expr))) { \
- lu_ref_print(__tmp); \
- cfs_spin_unlock(&__tmp->lf_guard); \
- lu_ref_print_all(); \
- LASSERT(0); \
- cfs_spin_lock(&__tmp->lf_guard); \
- } \
+#define REFASSERT(ref, expr) do { \
+ struct lu_ref *__tmp = (ref); \
+ \
+ if (unlikely(!(expr))) { \
+ lu_ref_print(__tmp); \
+ spin_unlock(&__tmp->lf_guard); \
+ lu_ref_print_all(); \
+ LASSERT(0); \
+ spin_lock(&__tmp->lf_guard); \
+ } \
} while (0)
struct lu_ref_link {
* Protected by lu_ref_refs_guard.
*/
static CFS_LIST_HEAD(lu_ref_refs);
-static cfs_spinlock_t lu_ref_refs_guard;
+static spinlock_t lu_ref_refs_guard;
static struct lu_ref lu_ref_marker = {
.lf_guard = DEFINE_SPINLOCK(lu_ref_marker.lf_guard),
.lf_list = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
void lu_ref_print_all(void)
{
- struct lu_ref *ref;
-
- cfs_spin_lock(&lu_ref_refs_guard);
- cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
- if (lu_ref_is_marker(ref))
- continue;
-
- cfs_spin_lock(&ref->lf_guard);
- lu_ref_print(ref);
- cfs_spin_unlock(&ref->lf_guard);
- }
- cfs_spin_unlock(&lu_ref_refs_guard);
+ struct lu_ref *ref;
+
+ spin_lock(&lu_ref_refs_guard);
+ cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
+ if (lu_ref_is_marker(ref))
+ continue;
+
+ spin_lock(&ref->lf_guard);
+ lu_ref_print(ref);
+ spin_unlock(&ref->lf_guard);
+ }
+ spin_unlock(&lu_ref_refs_guard);
}
EXPORT_SYMBOL(lu_ref_print_all);
void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
{
- ref->lf_refs = 0;
- ref->lf_func = func;
- ref->lf_line = line;
- cfs_spin_lock_init(&ref->lf_guard);
- CFS_INIT_LIST_HEAD(&ref->lf_list);
- cfs_spin_lock(&lu_ref_refs_guard);
- cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
- cfs_spin_unlock(&lu_ref_refs_guard);
+ ref->lf_refs = 0;
+ ref->lf_func = func;
+ ref->lf_line = line;
+ spin_lock_init(&ref->lf_guard);
+ CFS_INIT_LIST_HEAD(&ref->lf_list);
+ spin_lock(&lu_ref_refs_guard);
+ cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
+ spin_unlock(&lu_ref_refs_guard);
}
EXPORT_SYMBOL(lu_ref_init_loc);
void lu_ref_fini(struct lu_ref *ref)
{
- REFASSERT(ref, cfs_list_empty(&ref->lf_list));
- REFASSERT(ref, ref->lf_refs == 0);
- cfs_spin_lock(&lu_ref_refs_guard);
- cfs_list_del_init(&ref->lf_linkage);
- cfs_spin_unlock(&lu_ref_refs_guard);
+ REFASSERT(ref, cfs_list_empty(&ref->lf_list));
+ REFASSERT(ref, ref->lf_refs == 0);
+ spin_lock(&lu_ref_refs_guard);
+ cfs_list_del_init(&ref->lf_linkage);
+ spin_unlock(&lu_ref_refs_guard);
}
EXPORT_SYMBOL(lu_ref_fini);
link->ll_ref = ref;
link->ll_scope = scope;
link->ll_source = source;
- cfs_spin_lock(&ref->lf_guard);
- cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
- ref->lf_refs++;
- cfs_spin_unlock(&ref->lf_guard);
- }
- }
-
- if (link == NULL) {
- cfs_spin_lock(&ref->lf_guard);
- ref->lf_failed++;
- cfs_spin_unlock(&ref->lf_guard);
- link = ERR_PTR(-ENOMEM);
- }
-
- return link;
+ spin_lock(&ref->lf_guard);
+ cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
+ ref->lf_refs++;
+ spin_unlock(&ref->lf_guard);
+ }
+ }
+
+ if (link == NULL) {
+ spin_lock(&ref->lf_guard);
+ ref->lf_failed++;
+ spin_unlock(&ref->lf_guard);
+ link = ERR_PTR(-ENOMEM);
+ }
+
+ return link;
}
struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope,
void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
{
- struct lu_ref_link *link;
-
- cfs_spin_lock(&ref->lf_guard);
- link = lu_ref_find(ref, scope, source);
- if (link != NULL) {
- cfs_list_del(&link->ll_linkage);
- ref->lf_refs--;
- cfs_spin_unlock(&ref->lf_guard);
- OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
- } else {
- REFASSERT(ref, ref->lf_failed > 0);
- ref->lf_failed--;
- cfs_spin_unlock(&ref->lf_guard);
- }
+ struct lu_ref_link *link;
+
+ spin_lock(&ref->lf_guard);
+ link = lu_ref_find(ref, scope, source);
+ if (link != NULL) {
+ cfs_list_del(&link->ll_linkage);
+ ref->lf_refs--;
+ spin_unlock(&ref->lf_guard);
+ OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
+ } else {
+ REFASSERT(ref, ref->lf_failed > 0);
+ ref->lf_failed--;
+ spin_unlock(&ref->lf_guard);
+ }
}
EXPORT_SYMBOL(lu_ref_del);
void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
- const char *scope,
- const void *source0, const void *source1)
+ const char *scope,
+ const void *source0, const void *source1)
{
- cfs_spin_lock(&ref->lf_guard);
- if (link != ERR_PTR(-ENOMEM)) {
- REFASSERT(ref, link->ll_ref == ref);
- REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
- link->ll_source = source1;
- } else {
- REFASSERT(ref, ref->lf_failed > 0);
- }
- cfs_spin_unlock(&ref->lf_guard);
+ spin_lock(&ref->lf_guard);
+ if (link != ERR_PTR(-ENOMEM)) {
+ REFASSERT(ref, link->ll_ref == ref);
+ REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
+ link->ll_source = source1;
+ } else {
+ REFASSERT(ref, ref->lf_failed > 0);
+ }
+ spin_unlock(&ref->lf_guard);
}
EXPORT_SYMBOL(lu_ref_set_at);
void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
- const char *scope, const void *source)
+ const char *scope, const void *source)
{
- if (link != ERR_PTR(-ENOMEM)) {
- cfs_spin_lock(&ref->lf_guard);
- REFASSERT(ref, link->ll_ref == ref);
- REFASSERT(ref, lu_ref_link_eq(link, scope, source));
- cfs_list_del(&link->ll_linkage);
- ref->lf_refs--;
- cfs_spin_unlock(&ref->lf_guard);
- OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
- } else {
- cfs_spin_lock(&ref->lf_guard);
- REFASSERT(ref, ref->lf_failed > 0);
- ref->lf_failed--;
- cfs_spin_unlock(&ref->lf_guard);
- }
+ if (link != ERR_PTR(-ENOMEM)) {
+ spin_lock(&ref->lf_guard);
+ REFASSERT(ref, link->ll_ref == ref);
+ REFASSERT(ref, lu_ref_link_eq(link, scope, source));
+ cfs_list_del(&link->ll_linkage);
+ ref->lf_refs--;
+ spin_unlock(&ref->lf_guard);
+ OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
+ } else {
+ spin_lock(&ref->lf_guard);
+ REFASSERT(ref, ref->lf_failed > 0);
+ ref->lf_failed--;
+ spin_unlock(&ref->lf_guard);
+ }
}
EXPORT_SYMBOL(lu_ref_del_at);
static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct lu_ref *ref = seq->private;
+ struct lu_ref *ref = seq->private;
- cfs_spin_lock(&lu_ref_refs_guard);
- if (cfs_list_empty(&ref->lf_linkage))
- ref = NULL;
- cfs_spin_unlock(&lu_ref_refs_guard);
+ spin_lock(&lu_ref_refs_guard);
+ if (cfs_list_empty(&ref->lf_linkage))
+ ref = NULL;
+ spin_unlock(&lu_ref_refs_guard);
- return ref;
+ return ref;
}
static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
LASSERT(seq->private == p);
LASSERT(!cfs_list_empty(&ref->lf_linkage));
- cfs_spin_lock(&lu_ref_refs_guard);
- next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
- if (&next->lf_linkage == &lu_ref_refs) {
- p = NULL;
- } else {
- (*pos)++;
- cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
- }
- cfs_spin_unlock(&lu_ref_refs_guard);
- return p;
+ spin_lock(&lu_ref_refs_guard);
+ next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+ if (&next->lf_linkage == &lu_ref_refs) {
+ p = NULL;
+ } else {
+ (*pos)++;
+ cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
+ }
+ spin_unlock(&lu_ref_refs_guard);
+ return p;
}
static void lu_ref_seq_stop(struct seq_file *seq, void *p)
static int lu_ref_seq_show(struct seq_file *seq, void *p)
{
- struct lu_ref *ref = p;
- struct lu_ref *next;
-
- cfs_spin_lock(&lu_ref_refs_guard);
- next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
- if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
- cfs_spin_unlock(&lu_ref_refs_guard);
- return 0;
- }
-
- /* print the entry */
-
- cfs_spin_lock(&next->lf_guard);
+ struct lu_ref *ref = p;
+ struct lu_ref *next;
+
+ spin_lock(&lu_ref_refs_guard);
+ next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+ if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
+ spin_unlock(&lu_ref_refs_guard);
+ return 0;
+ }
+
+ /* print the entry */
+ spin_lock(&next->lf_guard);
seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
next, next->lf_refs, next->lf_failed,
next->lf_func, next->lf_line);
seq_printf(seq, " #%d link: %s %p\n",
i++, link->ll_scope, link->ll_source);
}
- cfs_spin_unlock(&next->lf_guard);
- cfs_spin_unlock(&lu_ref_refs_guard);
+ spin_unlock(&next->lf_guard);
+ spin_unlock(&lu_ref_refs_guard);
- return 0;
+ return 0;
}
static struct seq_operations lu_ref_seq_ops = {
static int lu_ref_seq_open(struct inode *inode, struct file *file)
{
- struct lu_ref *marker = &lu_ref_marker;
- int result = 0;
-
- result = seq_open(file, &lu_ref_seq_ops);
- if (result == 0) {
- cfs_spin_lock(&lu_ref_refs_guard);
- if (!cfs_list_empty(&marker->lf_linkage))
- result = -EAGAIN;
- else
- cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
- cfs_spin_unlock(&lu_ref_refs_guard);
+ struct lu_ref *marker = &lu_ref_marker;
+ int result = 0;
+
+ result = seq_open(file, &lu_ref_seq_ops);
+ if (result == 0) {
+ spin_lock(&lu_ref_refs_guard);
+ if (!cfs_list_empty(&marker->lf_linkage))
+ result = -EAGAIN;
+ else
+ cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
+ spin_unlock(&lu_ref_refs_guard);
if (result == 0) {
struct seq_file *f = file->private_data;
static int lu_ref_seq_release(struct inode *inode, struct file *file)
{
- struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
+ struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
- cfs_spin_lock(&lu_ref_refs_guard);
- cfs_list_del_init(&ref->lf_linkage);
- cfs_spin_unlock(&lu_ref_refs_guard);
+ spin_lock(&lu_ref_refs_guard);
+ cfs_list_del_init(&ref->lf_linkage);
+ spin_unlock(&lu_ref_refs_guard);
- return seq_release(inode, file);
+ return seq_release(inode, file);
}
static struct file_operations lu_ref_dump_fops = {
int lu_ref_global_init(void)
{
- int result;
-
- CDEBUG(D_CONSOLE,
- "lu_ref tracking is enabled. Performance isn't.\n");
+ int result;
+ CDEBUG(D_CONSOLE,
+ "lu_ref tracking is enabled. Performance isn't.\n");
- cfs_spin_lock_init(&lu_ref_refs_guard);
+ spin_lock_init(&lu_ref_refs_guard);
result = lu_kmem_init(lu_ref_caches);
#if defined(__KERNEL__) && defined(LPROCFS)
# define list_for_each_rcu cfs_list_for_each
# define list_for_each_safe_rcu cfs_list_for_each_safe
# define list_for_each_entry_rcu cfs_list_for_each_entry
-# define rcu_read_lock() cfs_spin_lock(&bucket->lock)
-# define rcu_read_unlock() cfs_spin_unlock(&bucket->lock)
+# define rcu_read_lock() spin_lock(&bucket->lock)
+# define rcu_read_unlock() spin_unlock(&bucket->lock)
#endif /* !__KERNEL__ */
static __u64 handle_base;
#define HANDLE_INCR 7
-static cfs_spinlock_t handle_base_lock;
+static spinlock_t handle_base_lock;
static struct handle_bucket {
- cfs_spinlock_t lock;
- cfs_list_t head;
+ spinlock_t lock;
+ cfs_list_t head;
} *handle_hash;
#ifdef __arch_um__
* This is fast, but simplistic cookie generation algorithm, it will
* need a re-do at some point in the future for security.
*/
- cfs_spin_lock(&handle_base_lock);
- handle_base += HANDLE_INCR;
-
- h->h_cookie = handle_base;
- if (unlikely(handle_base == 0)) {
- /*
- * Cookie of zero is "dangerous", because in many places it's
- * assumed that 0 means "unassigned" handle, not bound to any
- * object.
- */
- CWARN("The universe has been exhausted: cookie wrap-around.\n");
- handle_base += HANDLE_INCR;
- }
- cfs_spin_unlock(&handle_base_lock);
+ spin_lock(&handle_base_lock);
+ handle_base += HANDLE_INCR;
+
+ h->h_cookie = handle_base;
+ if (unlikely(handle_base == 0)) {
+ /*
+ * Cookie of zero is "dangerous", because in many places it's
+ * assumed that 0 means "unassigned" handle, not bound to any
+ * object.
+ */
+ CWARN("The universe has been exhausted: cookie wrap-around.\n");
+ handle_base += HANDLE_INCR;
+ }
+ spin_unlock(&handle_base_lock);
h->h_ops = ops;
- cfs_spin_lock_init(&h->h_lock);
+ spin_lock_init(&h->h_lock);
- bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
- cfs_spin_lock(&bucket->lock);
- list_add_rcu(&h->h_link, &bucket->head);
- h->h_in = 1;
- cfs_spin_unlock(&bucket->lock);
+ bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
+ spin_lock(&bucket->lock);
+ list_add_rcu(&h->h_link, &bucket->head);
+ h->h_in = 1;
+ spin_unlock(&bucket->lock);
- CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
- h, h->h_cookie);
- EXIT;
+ CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
+ h, h->h_cookie);
+ EXIT;
}
EXPORT_SYMBOL(class_handle_hash);
CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
h, h->h_cookie);
- cfs_spin_lock(&h->h_lock);
- if (h->h_in == 0) {
- cfs_spin_unlock(&h->h_lock);
- return;
- }
- h->h_in = 0;
- cfs_spin_unlock(&h->h_lock);
- list_del_rcu(&h->h_link);
+ spin_lock(&h->h_lock);
+ if (h->h_in == 0) {
+ spin_unlock(&h->h_lock);
+ return;
+ }
+ h->h_in = 0;
+ spin_unlock(&h->h_lock);
+ list_del_rcu(&h->h_link);
}
void class_handle_unhash(struct portals_handle *h)
{
- struct handle_bucket *bucket;
- bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
+ struct handle_bucket *bucket;
+ bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
- cfs_spin_lock(&bucket->lock);
- class_handle_unhash_nolock(h);
- cfs_spin_unlock(&bucket->lock);
+ spin_lock(&bucket->lock);
+ class_handle_unhash_nolock(h);
+ spin_unlock(&bucket->lock);
}
EXPORT_SYMBOL(class_handle_unhash);
void class_handle_hash_back(struct portals_handle *h)
{
- struct handle_bucket *bucket;
- ENTRY;
+ struct handle_bucket *bucket;
+ ENTRY;
- bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
+ bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
- cfs_spin_lock(&bucket->lock);
- list_add_rcu(&h->h_link, &bucket->head);
- h->h_in = 1;
- cfs_spin_unlock(&bucket->lock);
+ spin_lock(&bucket->lock);
+ list_add_rcu(&h->h_link, &bucket->head);
+ h->h_in = 1;
+ spin_unlock(&bucket->lock);
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(class_handle_hash_back);
if (h->h_cookie != cookie)
continue;
- cfs_spin_lock(&h->h_lock);
- if (likely(h->h_in != 0)) {
+ spin_lock(&h->h_lock);
+ if (likely(h->h_in != 0)) {
h->h_ops->hop_addref(h);
- retval = h;
- }
- cfs_spin_unlock(&h->h_lock);
- break;
- }
- rcu_read_unlock();
-
- RETURN(retval);
+ retval = h;
+ }
+ spin_unlock(&h->h_lock);
+ break;
+ }
+ rcu_read_unlock();
+
+ RETURN(retval);
}
EXPORT_SYMBOL(class_handle2object);
if (handle_hash == NULL)
return -ENOMEM;
- cfs_spin_lock_init(&handle_base_lock);
- for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
- bucket--) {
- CFS_INIT_LIST_HEAD(&bucket->head);
- cfs_spin_lock_init(&bucket->lock);
- }
+ spin_lock_init(&handle_base_lock);
+ for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
+ bucket--) {
+ CFS_INIT_LIST_HEAD(&bucket->head);
+ spin_lock_init(&bucket->lock);
+ }
/** bug 21430: add randomness to the initial base */
cfs_get_random_bytes(seed, sizeof(seed));
static int cleanup_all_handles(void)
{
- int rc;
- int i;
+ int rc;
+ int i;
- for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
- struct portals_handle *h;
+ for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
+ struct portals_handle *h;
- cfs_spin_lock(&handle_hash[i].lock);
- list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
+ spin_lock(&handle_hash[i].lock);
+ list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
CERROR("force clean handle "LPX64" addr %p ops %p\n",
h->h_cookie, h, h->h_ops);
- class_handle_unhash_nolock(h);
- rc++;
- }
- cfs_spin_unlock(&handle_hash[i].lock);
- }
+ class_handle_unhash_nolock(h);
+ rc++;
+ }
+ spin_unlock(&handle_hash[i].lock);
+ }
- return rc;
+ return rc;
}
void class_handle_cleanup(void)
};
/* FIXME: This should probably become more elegant than a global linked list */
-static cfs_list_t g_uuid_list;
-static cfs_spinlock_t g_uuid_lock;
+static cfs_list_t g_uuid_list;
+static spinlock_t g_uuid_lock;
void class_init_uuidlist(void)
{
- CFS_INIT_LIST_HEAD(&g_uuid_list);
- cfs_spin_lock_init(&g_uuid_lock);
+ CFS_INIT_LIST_HEAD(&g_uuid_list);
+ spin_lock_init(&g_uuid_lock);
}
void class_exit_uuidlist(void)
int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index)
{
- struct uuid_nid_data *data;
- struct obd_uuid tmp;
- int rc = -ENOENT;
-
- obd_str2uuid(&tmp, uuid);
- cfs_spin_lock(&g_uuid_lock);
- cfs_list_for_each_entry(data, &g_uuid_list, un_list) {
- if (obd_uuid_equals(&data->un_uuid, &tmp)) {
- if (index >= data->un_nid_count)
- break;
-
- rc = 0;
- *peer_nid = data->un_nids[index];
- break;
- }
- }
- cfs_spin_unlock(&g_uuid_lock);
- return rc;
+ struct uuid_nid_data *data;
+ struct obd_uuid tmp;
+ int rc = -ENOENT;
+
+ obd_str2uuid(&tmp, uuid);
+ spin_lock(&g_uuid_lock);
+ cfs_list_for_each_entry(data, &g_uuid_list, un_list) {
+ if (obd_uuid_equals(&data->un_uuid, &tmp)) {
+ if (index >= data->un_nid_count)
+ break;
+
+ rc = 0;
+ *peer_nid = data->un_nids[index];
+ break;
+ }
+ }
+ spin_unlock(&g_uuid_lock);
+ return rc;
}
EXPORT_SYMBOL(lustre_uuid_to_peer);
data->un_nids[0] = nid;
data->un_nid_count = 1;
- cfs_spin_lock(&g_uuid_lock);
+ spin_lock(&g_uuid_lock);
cfs_list_for_each_entry(entry, &g_uuid_list, un_list) {
if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) {
int i;
}
if (!found)
cfs_list_add(&data->un_list, &g_uuid_list);
- cfs_spin_unlock(&g_uuid_lock);
+ spin_unlock(&g_uuid_lock);
if (found) {
CDEBUG(D_INFO, "found uuid %s %s cnt=%d\n", uuid,
/* Delete the nids for one uuid if specified, otherwise delete all */
int class_del_uuid(const char *uuid)
{
- CFS_LIST_HEAD(deathrow);
- struct uuid_nid_data *data;
+ CFS_LIST_HEAD(deathrow);
+ struct uuid_nid_data *data;
- cfs_spin_lock(&g_uuid_lock);
+ spin_lock(&g_uuid_lock);
if (uuid != NULL) {
struct obd_uuid tmp;
}
} else
cfs_list_splice_init(&g_uuid_list, &deathrow);
- cfs_spin_unlock(&g_uuid_lock);
+ spin_unlock(&g_uuid_lock);
if (uuid != NULL && cfs_list_empty(&deathrow)) {
CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
CDEBUG(D_INFO, "check if uuid %s has %s.\n",
obd_uuid2str(uuid), libcfs_nid2str(nid));
- cfs_spin_lock(&g_uuid_lock);
+ spin_lock(&g_uuid_lock);
cfs_list_for_each_entry(entry, &g_uuid_list, un_list) {
int i;
}
break;
}
- cfs_spin_unlock (&g_uuid_lock);
- RETURN(found);
+ spin_unlock(&g_uuid_lock);
+ RETURN(found);
}
EXPORT_SYMBOL(class_check_uuid);
static cfs_list_t llo_lobj_list;
/** Lock to protect list manipulations */
-static cfs_mutex_t llo_lock;
+static struct mutex llo_lock;
/**
* Structure used to maintain state of path parsing.
void llo_local_obj_register(struct lu_local_obj_desc *llod)
{
- cfs_mutex_lock(&llo_lock);
+ mutex_lock(&llo_lock);
cfs_list_add_tail(&llod->llod_linkage, &llo_lobj_list);
- cfs_mutex_unlock(&llo_lock);
+ mutex_unlock(&llo_lock);
}
EXPORT_SYMBOL(llo_local_obj_register);
void llo_local_obj_unregister(struct lu_local_obj_desc *llod)
{
- cfs_mutex_lock(&llo_lock);
+ mutex_lock(&llo_lock);
cfs_list_del(&llod->llod_linkage);
- cfs_mutex_unlock(&llo_lock);
+ mutex_unlock(&llo_lock);
}
EXPORT_SYMBOL(llo_local_obj_unregister);
int rc = 0;
fid = &info->lti_cfid;
- cfs_mutex_lock(&llo_lock);
+ mutex_lock(&llo_lock);
cfs_list_for_each_entry(scan, &llo_lobj_list, llod_linkage) {
lu_local_obj_fid(fid, scan->llod_oid);
}
out:
- cfs_mutex_unlock(&llo_lock);
+ mutex_unlock(&llo_lock);
return rc;
}
int result;
CFS_INIT_LIST_HEAD(&llo_lobj_list);
- cfs_mutex_init(&llo_lock);
+ mutex_init(&llo_lock);
LU_CONTEXT_KEY_INIT(&llod_key);
result = lu_context_key_register(&llod_key);
LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0,
"%p obd_name %s != %s\n", obd, obd->obd_name, name);
- cfs_rwlock_init(&obd->obd_pool_lock);
- obd->obd_pool_limit = 0;
- obd->obd_pool_slv = 0;
-
- CFS_INIT_LIST_HEAD(&obd->obd_exports);
- CFS_INIT_LIST_HEAD(&obd->obd_unlinked_exports);
- CFS_INIT_LIST_HEAD(&obd->obd_delayed_exports);
- CFS_INIT_LIST_HEAD(&obd->obd_exports_timed);
- CFS_INIT_LIST_HEAD(&obd->obd_nid_stats);
- cfs_spin_lock_init(&obd->obd_nid_lock);
- cfs_spin_lock_init(&obd->obd_dev_lock);
- cfs_mutex_init(&obd->obd_dev_mutex);
- cfs_spin_lock_init(&obd->obd_osfs_lock);
- /* obd->obd_osfs_age must be set to a value in the distant
- * past to guarantee a fresh statfs is fetched on mount. */
- obd->obd_osfs_age = cfs_time_shift_64(-1000);
-
- /* XXX belongs in setup not attach */
- cfs_init_rwsem(&obd->obd_observer_link_sem);
- /* recovery data */
- cfs_init_timer(&obd->obd_recovery_timer);
- cfs_spin_lock_init(&obd->obd_recovery_task_lock);
+ rwlock_init(&obd->obd_pool_lock);
+ obd->obd_pool_limit = 0;
+ obd->obd_pool_slv = 0;
+
+ CFS_INIT_LIST_HEAD(&obd->obd_exports);
+ CFS_INIT_LIST_HEAD(&obd->obd_unlinked_exports);
+ CFS_INIT_LIST_HEAD(&obd->obd_delayed_exports);
+ CFS_INIT_LIST_HEAD(&obd->obd_exports_timed);
+ CFS_INIT_LIST_HEAD(&obd->obd_nid_stats);
+ spin_lock_init(&obd->obd_nid_lock);
+ spin_lock_init(&obd->obd_dev_lock);
+ mutex_init(&obd->obd_dev_mutex);
+ spin_lock_init(&obd->obd_osfs_lock);
+ /* obd->obd_osfs_age must be set to a value in the distant
+ * past to guarantee a fresh statfs is fetched on mount. */
+ obd->obd_osfs_age = cfs_time_shift_64(-1000);
+
+ /* XXX belongs in setup not attach */
+ init_rwsem(&obd->obd_observer_link_sem);
+ /* recovery data */
+ cfs_init_timer(&obd->obd_recovery_timer);
+ spin_lock_init(&obd->obd_recovery_task_lock);
cfs_waitq_init(&obd->obd_next_transno_waitq);
cfs_waitq_init(&obd->obd_evict_inprogress_waitq);
CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
}
/* Detach drops this */
- cfs_spin_lock(&obd->obd_dev_lock);
- cfs_atomic_set(&obd->obd_refcount, 1);
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ cfs_atomic_set(&obd->obd_refcount, 1);
+ spin_unlock(&obd->obd_dev_lock);
lu_ref_init(&obd->obd_reference);
lu_ref_add(&obd->obd_reference, "attach", obd);
}
/* is someone else setting us up right now? (attach inits spinlock) */
- cfs_spin_lock(&obd->obd_dev_lock);
- if (obd->obd_starting) {
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_starting) {
+ spin_unlock(&obd->obd_dev_lock);
CERROR("Device %d setup in progress (type %s)\n",
obd->obd_minor, obd->obd_type->typ_name);
RETURN(-EEXIST);
obd->obd_uuid_hash = NULL;
obd->obd_nid_hash = NULL;
obd->obd_nid_stats_hash = NULL;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
/* create an uuid-export lustre hash */
obd->obd_uuid_hash = cfs_hash_create("UUID_HASH",
obd->obd_set_up = 1;
- cfs_spin_lock(&obd->obd_dev_lock);
- /* cleanup drops this */
- class_incref(obd, "setup", obd);
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ /* cleanup drops this */
+ class_incref(obd, "setup", obd);
+ spin_unlock(&obd->obd_dev_lock);
CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n",
obd->obd_name, obd->obd_uuid.uuid);
RETURN(-EBUSY);
}
- cfs_spin_lock(&obd->obd_dev_lock);
- if (!obd->obd_attached) {
- cfs_spin_unlock(&obd->obd_dev_lock);
- CERROR("OBD device %d not attached\n", obd->obd_minor);
- RETURN(-ENODEV);
- }
- obd->obd_attached = 0;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
+ if (!obd->obd_attached) {
+ spin_unlock(&obd->obd_dev_lock);
+ CERROR("OBD device %d not attached\n", obd->obd_minor);
+ RETURN(-ENODEV);
+ }
+ obd->obd_attached = 0;
+ spin_unlock(&obd->obd_dev_lock);
CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n",
obd->obd_name, obd->obd_uuid.uuid);
RETURN(-ENODEV);
}
- cfs_spin_lock(&obd->obd_dev_lock);
- if (obd->obd_stopping) {
- cfs_spin_unlock(&obd->obd_dev_lock);
- CERROR("OBD %d already stopping\n", obd->obd_minor);
- RETURN(-ENODEV);
- }
- /* Leave this on forever */
- obd->obd_stopping = 1;
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_stopping) {
+ spin_unlock(&obd->obd_dev_lock);
+ CERROR("OBD %d already stopping\n", obd->obd_minor);
+ RETURN(-ENODEV);
+ }
+ /* Leave this on forever */
+ obd->obd_stopping = 1;
/* wait for already-arrived-connections to finish. */
while (obd->obd_conn_inprogress > 0) {
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
cfs_cond_resched();
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
}
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) {
for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++)
void class_decref(struct obd_device *obd, const char *scope, const void *source)
{
- int err;
- int refs;
+ int err;
+ int refs;
- cfs_spin_lock(&obd->obd_dev_lock);
- cfs_atomic_dec(&obd->obd_refcount);
- refs = cfs_atomic_read(&obd->obd_refcount);
- cfs_spin_unlock(&obd->obd_dev_lock);
- lu_ref_del(&obd->obd_reference, scope, source);
+ spin_lock(&obd->obd_dev_lock);
+ cfs_atomic_dec(&obd->obd_refcount);
+ refs = cfs_atomic_read(&obd->obd_refcount);
+ spin_unlock(&obd->obd_dev_lock);
+ lu_ref_del(&obd->obd_reference, scope, source);
- CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
+ CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
- if ((refs == 1) && obd->obd_stopping) {
- /* All exports have been destroyed; there should
- be no more in-progress ops by this point.*/
+ if ((refs == 1) && obd->obd_stopping) {
+ /* All exports have been destroyed; there should
+ be no more in-progress ops by this point.*/
- cfs_spin_lock(&obd->obd_self_export->exp_lock);
- obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
- cfs_spin_unlock(&obd->obd_self_export->exp_lock);
+ spin_lock(&obd->obd_self_export->exp_lock);
+ obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
+ spin_unlock(&obd->obd_self_export->exp_lock);
/* note that we'll recurse into class_decref again */
class_unlink_export(obd->obd_self_export);
/*********** mount lookup *********/
-CFS_DEFINE_MUTEX(lustre_mount_info_lock);
+DEFINE_MUTEX(lustre_mount_info_lock);
static CFS_LIST_HEAD(server_mount_info_list);
static struct lustre_mount_info *server_find_mount(const char *name)
}
strcpy(name_cp, name);
- cfs_mutex_lock(&lustre_mount_info_lock);
+ mutex_lock(&lustre_mount_info_lock);
if (server_find_mount(name)) {
- cfs_mutex_unlock(&lustre_mount_info_lock);
+ mutex_unlock(&lustre_mount_info_lock);
OBD_FREE(lmi, sizeof(*lmi));
OBD_FREE(name_cp, strlen(name) + 1);
CERROR("Already registered %s\n", name);
lmi->lmi_mnt = mnt;
cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
- cfs_mutex_unlock(&lustre_mount_info_lock);
+ mutex_unlock(&lustre_mount_info_lock);
CDEBUG(D_MOUNT, "reg_mnt %p from %s\n", lmi->lmi_mnt, name);
struct lustre_mount_info *lmi;
ENTRY;
- cfs_mutex_lock(&lustre_mount_info_lock);
+ mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
if (!lmi) {
- cfs_mutex_unlock(&lustre_mount_info_lock);
+ mutex_unlock(&lustre_mount_info_lock);
CERROR("%s not registered\n", name);
RETURN(-ENOENT);
}
OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
cfs_list_del(&lmi->lmi_list_chain);
OBD_FREE(lmi, sizeof(*lmi));
- cfs_mutex_unlock(&lustre_mount_info_lock);
+ mutex_unlock(&lustre_mount_info_lock);
RETURN(0);
}
struct lustre_sb_info *lsi;
ENTRY;
- cfs_mutex_lock(&lustre_mount_info_lock);
+ mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_mutex_unlock(&lustre_mount_info_lock);
+ mutex_unlock(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(NULL);
struct lustre_mount_info *lmi;
ENTRY;
- cfs_mutex_lock(&lustre_mount_info_lock);
+ mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_mutex_unlock(&lustre_mount_info_lock);
+ mutex_unlock(&lustre_mount_info_lock);
if (!lmi)
CERROR("Can't find mount for %s\n", name);
struct lustre_sb_info *lsi;
ENTRY;
- cfs_mutex_lock(&lustre_mount_info_lock);
+ mutex_lock(&lustre_mount_info_lock);
lmi = server_find_mount(name);
- cfs_mutex_unlock(&lustre_mount_info_lock);
+ mutex_unlock(&lustre_mount_info_lock);
if (!lmi) {
CERROR("Can't find mount for %s\n", name);
RETURN(-ENOENT);
RETURN(rc);
}
-CFS_DEFINE_MUTEX(mgc_start_lock);
+DEFINE_MUTEX(mgc_start_lock);
/** Set up a mgc obd to process startup logs
*
RETURN(-EINVAL);
}
- cfs_mutex_lock(&mgc_start_lock);
+ mutex_lock(&mgc_start_lock);
len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
OBD_ALLOC(mgcname, len);
to the same mgc.*/
lsi->lsi_mgc = obd;
out_free:
- cfs_mutex_unlock(&mgc_start_lock);
+ mutex_unlock(&mgc_start_lock);
if (data)
OBD_FREE_PTR(data);
RETURN(-ENOENT);
lsi->lsi_mgc = NULL;
- cfs_mutex_lock(&mgc_start_lock);
+ mutex_lock(&mgc_start_lock);
LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
/* This is not fatal, every client that stops
OBD_FREE(niduuid, len);
/* class_import_put will get rid of the additional connections */
- cfs_mutex_unlock(&mgc_start_lock);
+ mutex_unlock(&mgc_start_lock);
RETURN(rc);
}
EXPORT_SYMBOL(tgt_name2ospname);
static CFS_LIST_HEAD(osp_register_list);
-CFS_DEFINE_MUTEX(osp_register_list_lock);
+DEFINE_MUTEX(osp_register_list_lock);
int lustre_register_osp_item(char *ospname, struct obd_export **exp,
register_osp_cb cb_func, void *cb_data)
if (ori == NULL)
RETURN(-ENOMEM);
- cfs_mutex_lock(&osp_register_list_lock);
+ mutex_lock(&osp_register_list_lock);
osp = class_name2obd(ospname);
if (osp != NULL && osp->obd_set_up == 1) {
OBD_ALLOC_PTR(uuid);
if (uuid == NULL) {
- cfs_mutex_unlock(&osp_register_list_lock);
+ mutex_unlock(&osp_register_list_lock);
RETURN(-ENOMEM);
}
memcpy(uuid->uuid, ospname, strlen(ospname));
if (*exp != NULL && cb_func != NULL)
cb_func(cb_data);
- cfs_mutex_unlock(&osp_register_list_lock);
+ mutex_unlock(&osp_register_list_lock);
RETURN(0);
}
EXPORT_SYMBOL(lustre_register_osp_item);
{
struct osp_register_item *ori, *tmp;
- cfs_mutex_lock(&osp_register_list_lock);
+ mutex_lock(&osp_register_list_lock);
cfs_list_for_each_entry_safe(ori, tmp, &osp_register_list, ori_list) {
if (exp == ori->ori_exp) {
if (*exp)
break;
}
}
- cfs_mutex_unlock(&osp_register_list_lock);
+ mutex_unlock(&osp_register_list_lock);
}
EXPORT_SYMBOL(lustre_deregister_osp_item);
struct osp_register_item *ori, *tmp;
LASSERT(exp != NULL);
- cfs_mutex_lock(&osp_register_list_lock);
+ mutex_lock(&osp_register_list_lock);
cfs_list_for_each_entry_safe(ori, tmp, &osp_register_list, ori_list) {
if (strcmp(exp->exp_obd->obd_name, ori->ori_name))
continue;
if (ori->ori_cb_func != NULL)
ori->ori_cb_func(ori->ori_cb_data);
}
- cfs_mutex_unlock(&osp_register_list_lock);
+ mutex_unlock(&osp_register_list_lock);
}
static int lustre_osp_connect(struct obd_device *osp)
RETURN(rc);
}
-CFS_DEFINE_MUTEX(server_start_lock);
+DEFINE_MUTEX(server_start_lock);
/* Stop MDS/OSS if nobody is using them */
static int server_stop_servers(int lsiflags)
int rc = 0;
ENTRY;
- cfs_mutex_lock(&server_start_lock);
+ mutex_lock(&server_start_lock);
/* Either an MDT or an OST or neither */
/* if this was an MDT, and there are no more MDT's, clean up the MDS */
rc = err;
}
- cfs_mutex_unlock(&server_start_lock);
+ mutex_unlock(&server_start_lock);
RETURN(rc);
}
/* If we're an MDT, make sure the global MDS is running */
if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_MDT) {
/* make sure the MDS is started */
- cfs_mutex_lock(&server_start_lock);
+ mutex_lock(&server_start_lock);
obd = class_name2obd(LUSTRE_MDS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_MDS_OBDNAME,
LUSTRE_MDS_OBDNAME"_uuid",
0, 0);
if (rc) {
- cfs_mutex_unlock(&server_start_lock);
+ mutex_unlock(&server_start_lock);
CERROR("failed to start MDS: %d\n", rc);
RETURN(rc);
}
}
- cfs_mutex_unlock(&server_start_lock);
+ mutex_unlock(&server_start_lock);
}
#endif
/* If we're an OST, make sure the global OSS is running */
if (IS_OST(lsi)) {
/* make sure OSS is started */
- cfs_mutex_lock(&server_start_lock);
+ mutex_lock(&server_start_lock);
obd = class_name2obd(LUSTRE_OSS_OBDNAME);
if (!obd) {
rc = lustre_start_simple(LUSTRE_OSS_OBDNAME,
LUSTRE_OSS_OBDNAME"_uuid",
0, 0, 0, 0);
if (rc) {
- cfs_mutex_unlock(&server_start_lock);
+ mutex_unlock(&server_start_lock);
CERROR("failed to start OSS: %d\n", rc);
RETURN(rc);
}
}
- cfs_mutex_unlock(&server_start_lock);
+ mutex_unlock(&server_start_lock);
}
/* Set the mgc fs to our server disk. This allows the MGC to
.statfs = server_statfs,
};
-#define log2(n) cfs_ffz(~(n))
+#define log2(n) ffz(~(n))
#define LUSTRE_SUPER_MAGIC 0x0BD00BD1
static int server_fill_super_common(struct super_block *sb)
* Disable lockdep during mount, because mount locking patterns are
* `special'.
*/
- cfs_lockdep_off();
+ lockdep_off();
/*
* LU-639: the obd cleanup of last mount may not finish yet, wait here.
CDEBUG(D_SUPER, "Mount %s complete\n",
lmd->lmd_dev);
}
- cfs_lockdep_on();
+ lockdep_on();
return rc;
}
static __u64 echo_next_id(struct obd_device *obddev)
{
- obd_id id;
+ obd_id id;
- cfs_spin_lock(&obddev->u.echo.eo_lock);
- id = ++obddev->u.echo.eo_lastino;
- cfs_spin_unlock(&obddev->u.echo.eo_lock);
+ spin_lock(&obddev->u.echo.eo_lock);
+ id = ++obddev->u.echo.eo_lastino;
+ spin_unlock(&obddev->u.echo.eo_lock);
- return id;
+ return id;
}
static int echo_create(const struct lu_env *env, struct obd_export *exp,
ENTRY;
obd->u.echo.eo_obt.obt_magic = OBT_MAGIC;
- cfs_spin_lock_init(&obd->u.echo.eo_lock);
+ spin_lock_init(&obd->u.echo.eo_lock);
obd->u.echo.eo_lastino = ECHO_INIT_OID;
sprintf(ns_name, "echotgt-%s", obd->obd_uuid.uuid);
struct echo_page {
struct cl_page_slice ep_cl;
- cfs_mutex_t ep_lock;
+ struct mutex ep_lock;
cfs_page_t *ep_vmpage;
};
struct echo_page *ep = cl2echo_page(slice);
if (!nonblock)
- cfs_mutex_lock(&ep->ep_lock);
- else if (!cfs_mutex_trylock(&ep->ep_lock))
+ mutex_lock(&ep->ep_lock);
+ else if (!mutex_trylock(&ep->ep_lock))
return -EAGAIN;
return 0;
}
{
struct echo_page *ep = cl2echo_page(slice);
- LASSERT(cfs_mutex_is_locked(&ep->ep_lock));
- cfs_mutex_unlock(&ep->ep_lock);
+ LASSERT(mutex_is_locked(&ep->ep_lock));
+ mutex_unlock(&ep->ep_lock);
}
static void echo_page_discard(const struct lu_env *env,
static int echo_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- if (cfs_mutex_is_locked(&cl2echo_page(slice)->ep_lock))
+ if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
return -EBUSY;
return -ENODATA;
}
struct echo_page *ep = cl2echo_page(slice);
(*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
- ep, cfs_mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
+ ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
return 0;
}
struct echo_object *eco = cl2echo_obj(obj);
ep->ep_vmpage = vmpage;
page_cache_get(vmpage);
- cfs_mutex_init(&ep->ep_lock);
+ mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
cfs_atomic_inc(&eco->eo_npages);
}
eco->eo_dev = ed;
cfs_atomic_set(&eco->eo_npages, 0);
- cfs_spin_lock(&ec->ec_lock);
- cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
- cfs_spin_unlock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
+ cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+ spin_unlock(&ec->ec_lock);
- RETURN(0);
+ RETURN(0);
}
/* taken from osc_unpackmd() */
LASSERT(cfs_atomic_read(&eco->eo_npages) == 0);
- cfs_spin_lock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
cfs_list_del_init(&eco->eo_obj_chain);
- cfs_spin_unlock(&ec->ec_lock);
+ spin_unlock(&ec->ec_lock);
lu_object_fini(obj);
lu_object_header_fini(obj->lo_header);
ls = next->ld_site;
- cfs_spin_lock(&ls->ls_ld_lock);
- cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
- if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
- found = 1;
- break;
- }
- }
- cfs_spin_unlock(&ls->ls_ld_lock);
+ spin_lock(&ls->ls_ld_lock);
+ cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
+ if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock(&ls->ls_ld_lock);
if (found == 0) {
CERROR("%s is not lu device type!\n",
* all of cached objects. Anyway, probably the echo device is being
* parallelly accessed.
*/
- cfs_spin_lock(&ec->ec_lock);
- cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
- eco->eo_deleted = 1;
- cfs_spin_unlock(&ec->ec_lock);
-
- /* purge again */
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
-
- CDEBUG(D_INFO,
- "Waiting for the reference of echo object to be dropped\n");
-
- /* Wait for the last reference to be dropped. */
- cfs_spin_lock(&ec->ec_lock);
- while (!cfs_list_empty(&ec->ec_objects)) {
- cfs_spin_unlock(&ec->ec_lock);
- CERROR("echo_client still has objects at cleanup time, "
- "wait for 1 second\n");
- cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
- cfs_time_seconds(1));
- lu_site_purge(env, &ed->ed_site->cs_lu, -1);
- cfs_spin_lock(&ec->ec_lock);
- }
- cfs_spin_unlock(&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
+ cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+ eco->eo_deleted = 1;
+ spin_unlock(&ec->ec_lock);
+
+ /* purge again */
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+
+ CDEBUG(D_INFO,
+ "Waiting for the reference of echo object to be dropped\n");
+
+ /* Wait for the last reference to be dropped. */
+ spin_lock(&ec->ec_lock);
+ while (!cfs_list_empty(&ec->ec_objects)) {
+ spin_unlock(&ec->ec_lock);
+ CERROR("echo_client still has objects at cleanup time, "
+ "wait for 1 second\n");
+ cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+ cfs_time_seconds(1));
+ lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+ spin_lock(&ec->ec_lock);
+ }
+ spin_unlock(&ec->ec_lock);
LASSERT(cfs_list_empty(&ec->ec_locks));
if (eco->eo_deleted) {
struct lu_object_header *loh = obj->co_lu.lo_header;
LASSERT(&eco->eo_hdr == luh2coh(loh));
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
}
cl_object_put(env, obj);
rc = cl_wait(env, lck);
if (rc == 0) {
el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- cfs_spin_lock(&ec->ec_lock);
- if (cfs_list_empty(&el->el_chain)) {
- cfs_list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- cfs_atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- cfs_spin_unlock(&ec->ec_lock);
- } else
- cl_lock_release(env, lck, "ec enqueue", cfs_current());
- }
- RETURN(rc);
+ spin_lock(&ec->ec_lock);
+ if (cfs_list_empty(&el->el_chain)) {
+ cfs_list_add(&el->el_chain, &ec->ec_locks);
+ el->el_cookie = ++ec->ec_unique;
+ }
+ cfs_atomic_inc(&el->el_refcount);
+ *cookie = el->el_cookie;
+ spin_unlock(&ec->ec_lock);
+ } else {
+ cl_lock_release(env, lck, "ec enqueue", cfs_current());
+ }
+ }
+ RETURN(rc);
}
static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
ENTRY;
LASSERT(ec != NULL);
- cfs_spin_lock (&ec->ec_lock);
+ spin_lock(&ec->ec_lock);
cfs_list_for_each (el, &ec->ec_locks) {
ecl = cfs_list_entry (el, struct echo_lock, el_chain);
CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
break;
}
}
- cfs_spin_unlock (&ec->ec_lock);
+ spin_unlock(&ec->ec_lock);
if (!found)
RETURN(-ENOENT);
RETURN(-EINVAL);
}
- cfs_spin_lock_init (&ec->ec_lock);
+ spin_lock_init(&ec->ec_lock);
CFS_INIT_LIST_HEAD (&ec->ec_objects);
CFS_INIT_LIST_HEAD (&ec->ec_locks);
ec->ec_unique = 0;
rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
if (rc == 0) {
/* Turn off pinger because it connects to tgt obd directly. */
- cfs_spin_lock(&tgt->obd_dev_lock);
- cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
- cfs_spin_unlock(&tgt->obd_dev_lock);
+ spin_lock(&tgt->obd_dev_lock);
+ cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+ spin_unlock(&tgt->obd_dev_lock);
}
OBD_FREE(ocd, sizeof(*ocd));
"a huge part of the free space is now reserved for "
"grants\n", obd->obd_name);
- cfs_spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&ofd->ofd_grant_lock);
ofd->ofd_grant_ratio = ofd_grant_ratio_conv(val);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
return count;
}
if (val < 1)
return -EINVAL;
- cfs_spin_lock(&ofd->ofd_objid_lock);
+ spin_lock(&ofd->ofd_objid_lock);
ofd->ofd_precreate_batch = val;
- cfs_spin_unlock(&ofd->ofd_objid_lock);
+ spin_unlock(&ofd->ofd_objid_lock);
return count;
}
if (rc)
return rc;
- cfs_spin_lock(&ofd->ofd_flags_lock);
+ spin_lock(&ofd->ofd_flags_lock);
ofd->ofd_raid_degraded = !!val;
- cfs_spin_unlock(&ofd->ofd_flags_lock);
+ spin_unlock(&ofd->ofd_flags_lock);
return count;
}
if (val < 0)
return -EINVAL;
- cfs_spin_lock(&ofd->ofd_flags_lock);
+ spin_lock(&ofd->ofd_flags_lock);
ofd->ofd_syncjournal = !!val;
ofd_slc_set(ofd);
- cfs_spin_unlock(&ofd->ofd_flags_lock);
+ spin_unlock(&ofd->ofd_flags_lock);
return count;
}
if (val < 0 || val > 2)
return -EINVAL;
- cfs_spin_lock(&ofd->ofd_flags_lock);
+ spin_lock(&ofd->ofd_flags_lock);
ofd->ofd_sync_lock_cancel = val;
- cfs_spin_unlock(&ofd->ofd_flags_lock);
+ spin_unlock(&ofd->ofd_flags_lock);
return count;
}
if (val < 0)
return -EINVAL;
- cfs_spin_lock(&ofd->ofd_flags_lock);
+ spin_lock(&ofd->ofd_flags_lock);
ofd->ofd_grant_compat_disable = !!val;
- cfs_spin_unlock(&ofd->ofd_flags_lock);
+ spin_unlock(&ofd->ofd_flags_lock);
return count;
}
struct filter_capa_key *k, *keys[2] = { NULL, NULL };
int i;
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
cfs_list_for_each_entry(k, &obd->u.filter.fo_capa_keys, k_list) {
if (k->k_key.lk_seq != new->lk_seq)
continue;
keys[0] = k;
}
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
for (i = 0; i < 2; i++) {
if (!keys[i])
/* maybe because of recovery or other reasons, MDS sent the
* the old capability key again.
*/
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
keys[i]->k_key = *new;
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
RETURN(0);
}
CFS_INIT_LIST_HEAD(&k->k_list);
}
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
k->k_key = *new;
if (cfs_list_empty(&k->k_list))
cfs_list_add(&k->k_list, &obd->u.filter.fo_capa_keys);
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
DEBUG_CAPA_KEY(D_SEC, new, "new");
RETURN(0);
oc = capa_lookup(filter->fo_capa_hash, capa, 0);
if (oc) {
- cfs_spin_lock(&oc->c_lock);
+ spin_lock(&oc->c_lock);
if (capa_is_expired(oc)) {
DEBUG_CAPA(D_ERROR, capa, "expired");
rc = -ESTALE;
}
- cfs_spin_unlock(&oc->c_lock);
+ spin_unlock(&oc->c_lock);
capa_put(oc);
RETURN(rc);
RETURN(-ESTALE);
}
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
cfs_list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
if (k->k_key.lk_seq == seq) {
keys_ready = 1;
}
}
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
if (!keys_ready) {
CDEBUG(D_SEC, "MDS hasn't propagated capability keys yet, "
struct obd_device *obd = ofd_obd(ofd);
struct filter_capa_key *key, *n;
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
cfs_list_for_each_entry_safe(key, n, &obd->u.filter.fo_capa_keys, k_list) {
cfs_list_del_init(&key->k_list);
OBD_FREE_PTR(key);
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
}
m->ofd_fmd_max_num = OFD_FMD_MAX_NUM_DEFAULT;
m->ofd_fmd_max_age = OFD_FMD_MAX_AGE_DEFAULT;
- cfs_spin_lock_init(&m->ofd_flags_lock);
+ spin_lock_init(&m->ofd_flags_lock);
m->ofd_raid_degraded = 0;
m->ofd_syncjournal = 0;
ofd_slc_set(m);
m->ofd_grant_compat_disable = 0;
/* statfs data */
- cfs_spin_lock_init(&m->ofd_osfs_lock);
+ spin_lock_init(&m->ofd_osfs_lock);
m->ofd_osfs_age = cfs_time_shift_64(-1000);
m->ofd_osfs_unstable = 0;
m->ofd_statfs_inflight = 0;
m->ofd_osfs_inflight = 0;
/* grant data */
- cfs_spin_lock_init(&m->ofd_grant_lock);
+ spin_lock_init(&m->ofd_grant_lock);
m->ofd_tot_dirty = 0;
m->ofd_tot_granted = 0;
m->ofd_tot_pending = 0;
m->ofd_max_group = 0;
- cfs_rwlock_init(&obd->u.filter.fo_sptlrpc_lock);
+ rwlock_init(&obd->u.filter.fo_sptlrpc_lock);
sptlrpc_rule_set_init(&obd->u.filter.fo_sptlrpc_rset);
obd->u.filter.fo_fl_oss_capa = 0;
obd->obd_name, osfs->os_bsize);
GOTO(err_fini_stack, rc = -EPROTO);
}
- m->ofd_blockbits = cfs_fls(osfs->os_bsize) - 1;
+ m->ofd_blockbits = fls(osfs->os_bsize) - 1;
snprintf(info->fti_u.name, sizeof(info->fti_u.name), "filter-%p", m);
m->ofd_namespace = ldlm_namespace_new(obd, info->fti_u.name,
if (fmd == NULL)
return;
- cfs_spin_lock(&fed->fed_lock);
+ spin_lock(&fed->fed_lock);
ofd_fmd_put_nolock(exp, fmd); /* caller reference */
- cfs_spin_unlock(&fed->fed_lock);
+ spin_unlock(&fed->fed_lock);
}
/* expire entries from the end of the list if there are too many
{
struct filter_export_data *fed = &exp->exp_filter_data;
- cfs_spin_lock(&fed->fed_lock);
+ spin_lock(&fed->fed_lock);
ofd_fmd_expire_nolock(exp, NULL);
- cfs_spin_unlock(&fed->fed_lock);
+ spin_unlock(&fed->fed_lock);
}
/* find specified fid in fed_fmd_list.
struct filter_export_data *fed = &exp->exp_filter_data;
struct ofd_mod_data *fmd;
- cfs_spin_lock(&fed->fed_lock);
+ spin_lock(&fed->fed_lock);
fmd = ofd_fmd_find_nolock(exp, fid);
if (fmd)
fmd->fmd_refcount++; /* caller reference */
- cfs_spin_unlock(&fed->fed_lock);
+ spin_unlock(&fed->fed_lock);
return fmd;
}
OBD_SLAB_ALLOC_PTR(fmd_new, ll_fmd_cachep);
- cfs_spin_lock(&fed->fed_lock);
+ spin_lock(&fed->fed_lock);
found = ofd_fmd_find_nolock(exp, fid);
if (fmd_new) {
if (found == NULL) {
found->fmd_expire = cfs_time_add(now, ofd->ofd_fmd_max_age);
}
- cfs_spin_unlock(&fed->fed_lock);
+ spin_unlock(&fed->fed_lock);
return found;
}
struct filter_export_data *fed = &exp->exp_filter_data;
struct ofd_mod_data *found = NULL;
- cfs_spin_lock(&fed->fed_lock);
+ spin_lock(&fed->fed_lock);
found = ofd_fmd_find_nolock(exp, fid);
if (found) {
cfs_list_del_init(&found->fmd_list);
ofd_fmd_put_nolock(exp, found);
}
- cfs_spin_unlock(&fed->fed_lock);
+ spin_unlock(&fed->fed_lock);
}
#endif
struct filter_export_data *fed = &exp->exp_filter_data;
struct ofd_mod_data *fmd = NULL, *tmp;
- cfs_spin_lock(&fed->fed_lock);
+ spin_lock(&fed->fed_lock);
cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
cfs_list_del_init(&fmd->fmd_list);
if (fmd->fmd_refcount > 1) {
}
ofd_fmd_put_nolock(exp, fmd);
}
- cfs_spin_unlock(&fed->fed_lock);
+ spin_unlock(&fed->fed_lock);
}
int ofd_fmd_init(void)
{
int count;
- cfs_spin_lock(&ofd->ofd_objid_lock);
+ spin_lock(&ofd->ofd_objid_lock);
count = min(ofd->ofd_precreate_batch, batch);
- cfs_spin_unlock(&ofd->ofd_objid_lock);
+ spin_unlock(&ofd->ofd_objid_lock);
return count;
}
LASSERT(group <= ofd->ofd_max_group);
- cfs_spin_lock(&ofd->ofd_objid_lock);
+ spin_lock(&ofd->ofd_objid_lock);
id = ofd->ofd_last_objids[group];
- cfs_spin_unlock(&ofd->ofd_objid_lock);
+ spin_unlock(&ofd->ofd_objid_lock);
return id;
}
void ofd_last_id_set(struct ofd_device *ofd, obd_id id, obd_seq group)
{
LASSERT(group <= ofd->ofd_max_group);
- cfs_spin_lock(&ofd->ofd_objid_lock);
+ spin_lock(&ofd->ofd_objid_lock);
if (ofd->ofd_last_objids[group] < id)
ofd->ofd_last_objids[group] = id;
- cfs_spin_unlock(&ofd->ofd_objid_lock);
+ spin_unlock(&ofd->ofd_objid_lock);
}
int ofd_last_id_write(const struct lu_env *env, struct ofd_device *ofd,
RETURN(PTR_ERR(dob));
ofd->ofd_lastid_obj[group] = dob;
- cfs_mutex_init(&ofd->ofd_create_locks[group]);
+ mutex_init(&ofd->ofd_create_locks[group]);
rc = dt_attr_get(env, dob, &info->fti_attr, BYPASS_CAPA);
if (rc)
ENTRY;
- cfs_spin_lock_init(&ofd->ofd_objid_lock);
+ spin_lock_init(&ofd->ofd_objid_lock);
rc = dt_attr_get(env, ofd->ofd_last_group_file,
&info->fti_attr, BYPASS_CAPA);
LASSERTF(rc == 0, "rc = %d\n", rc); /* can't fail existing */
/* VBR: set export last committed version */
exp->exp_last_committed = last_rcvd;
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
exp->exp_connecting = 0;
exp->exp_in_recovery = 0;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
obd->obd_max_recoverable_clients++;
class_export_put(exp);
CDEBUG(D_OTHER, "client at idx %d has last_rcvd = "LPU64"\n",
cl_idx, last_rcvd);
- cfs_spin_lock(&ofd->ofd_lut.lut_translock);
+ spin_lock(&ofd->ofd_lut.lut_translock);
if (last_rcvd > lsd->lsd_last_transno)
lsd->lsd_last_transno = last_rcvd;
- cfs_spin_unlock(&ofd->ofd_lut.lut_translock);
+ spin_unlock(&ofd->ofd_lut.lut_translock);
}
err_out:
rc = ofd_clients_data_init(env, ofd, last_rcvd_size);
- cfs_spin_lock(&ofd->ofd_lut.lut_translock);
+ spin_lock(&ofd->ofd_lut.lut_translock);
obd->obd_last_committed = lsd->lsd_last_transno;
ofd->ofd_lut.lut_last_transno = lsd->lsd_last_transno;
- cfs_spin_unlock(&ofd->ofd_lut.lut_translock);
+ spin_unlock(&ofd->ofd_lut.lut_translock);
/* save it, so mount count and last_transno is current */
rc = tgt_server_data_update(env, &ofd->ofd_lut, 0);
maxsize = ofd->ofd_osfs.os_blocks << ofd->ofd_blockbits;
- cfs_spin_lock(&obd->obd_dev_lock);
- cfs_spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&obd->obd_dev_lock);
+ spin_lock(&ofd->ofd_grant_lock);
cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
int error = 0;
" > maxsize("LPU64")\n", obd->obd_name,
exp->exp_client_uuid.uuid, exp, fed->fed_grant,
fed->fed_pending, maxsize);
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
LBUG();
}
if (fed->fed_dirty > maxsize) {
CERROR("%s: cli %s/%p fed_dirty(%ld) > maxsize("LPU64
")\n", obd->obd_name, exp->exp_client_uuid.uuid,
exp, fed->fed_dirty, maxsize);
- cfs_spin_unlock(&obd->obd_dev_lock);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
LBUG();
}
CDEBUG_LIMIT(error ? D_ERROR : D_CACHE, "%s: cli %s/%p dirty "
tot_pending += fed->fed_pending;
tot_dirty += fed->fed_dirty;
}
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
fo_tot_granted = ofd->ofd_tot_granted;
fo_tot_pending = ofd->ofd_tot_pending;
fo_tot_dirty = ofd->ofd_tot_dirty;
if (tot_dirty > maxsize)
CERROR("%s: tot_dirty "LPU64" > maxsize "LPU64"\n",
func, tot_dirty, maxsize);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
}
/**
ENTRY;
LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
- cfs_spin_lock(&ofd->ofd_osfs_lock);
+ spin_lock(&ofd->ofd_osfs_lock);
/* get available space from cached statfs data */
left = ofd->ofd_osfs.os_bavail << ofd->ofd_blockbits;
unstable = ofd->ofd_osfs_unstable; /* those might be accounted twice */
- cfs_spin_unlock(&ofd->ofd_osfs_lock);
+ spin_unlock(&ofd->ofd_osfs_lock);
tot_granted = ofd->ofd_tot_granted;
CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
obd->obd_name, exp->exp_client_uuid.uuid, exp,
fed->fed_dirty, fed->fed_pending, fed->fed_grant);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
LBUG();
}
EXIT;
CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
obd->obd_name, exp->exp_client_uuid.uuid, exp,
fed->fed_dirty, fed->fed_pending, fed->fed_grant);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
LBUG();
}
EXIT;
CERROR("%s: cli %s/%p grant %ld want "LPU64" current "LPU64"\n",
obd->obd_name, exp->exp_client_uuid.uuid, exp,
fed->fed_grant, want, curgrant);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
LBUG();
}
refresh:
ofd_grant_statfs(env, exp, force, &from_cache);
- cfs_spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&ofd->ofd_grant_lock);
/* Grab free space from cached info and take out space already granted
* to clients as well as reserved space */
/* get fresh statfs data if we are short in ungranted space */
if (from_cache && left < 32 * ofd_grant_chunk(exp, ofd)) {
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
force = 1;
goto refresh;
grant = ofd_grant_to_cli(exp, ofd, (obd_size)fed->fed_grant);
ofd->ofd_tot_granted_clients++;
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %ld want: "LPU64" left: "
LPU64"\n", exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
struct ofd_device *ofd = ofd_exp(exp);
struct filter_export_data *fed = &exp->exp_filter_data;
- cfs_spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&ofd->ofd_grant_lock);
LASSERTF(ofd->ofd_tot_granted >= fed->fed_grant,
"%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
obd->obd_name, ofd->ofd_tot_granted,
exp->exp_client_uuid.uuid, exp, fed->fed_dirty);
ofd->ofd_tot_dirty -= fed->fed_dirty;
fed->fed_dirty = 0;
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
}
/**
ofd_grant_statfs(env, exp, 1, NULL);
/* protect all grant counters */
- cfs_spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&ofd->ofd_grant_lock);
/* Grab free space from cached statfs data and take out space
* already granted to clients as well as reserved space */
* since we don't grant space back on reads, no point
* in running statfs, so just skip it and process
* incoming grant data directly. */
- cfs_spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&ofd->ofd_grant_lock);
do_shrink = 0;
}
else
oa->o_grant = 0;
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
}
/**
/* get statfs information from OSD layer */
ofd_grant_statfs(env, exp, force, &from_cache);
- cfs_spin_lock(&ofd->ofd_grant_lock); /* protect all grant counters */
+ spin_lock(&ofd->ofd_grant_lock); /* protect all grant counters */
/* Grab free space from cached statfs data and take out space already
* granted to clients as well as reserved space */
/* Get fresh statfs data if we are short in ungranted space */
if (from_cache && left < 32 * ofd_grant_chunk(exp, ofd)) {
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
CDEBUG(D_CACHE, "%s: fs has no space left and statfs too old\n",
obd->obd_name);
force = 1;
if (!from_grant) {
/* at least one network buffer requires acquiring grant
* space on the server */
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
/* discard errors, at least we tried ... */
rc = dt_sync(env, ofd->ofd_osd);
force = 2;
ofd_grant_check(env, exp, oa, rnb, niocount, &left);
if (!(oa->o_valid & OBD_MD_FLGRANT)) {
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
RETURN_EXIT;
}
else
/* grant more space back to the client if possible */
oa->o_grant = ofd_grant(exp, oa->o_grant, oa->o_undirty, left);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
}
/**
ofd_grant_statfs(env, exp, 1, NULL);
/* protect all grant counters */
- cfs_spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&ofd->ofd_grant_lock);
/* fail precreate request if there is not enough blocks available for
* writing */
if (ofd->ofd_osfs.os_bavail - (fed->fed_grant >> ofd->ofd_blockbits) <
(ofd->ofd_osfs.os_blocks >> 10)) {
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
CDEBUG(D_RPCTRACE, "%s: not enough space for create "LPU64"\n",
ofd_obd(ofd)->obd_name,
ofd->ofd_osfs.os_bavail * ofd->ofd_osfs.os_blocks);
if (*nr == 0) {
/* we really have no space any more for precreation,
* fail the precreate request with ENOSPC */
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
RETURN(-ENOSPC);
}
/* compute space needed for the new number of creations */
/* grant more space (twice as much as needed for this request) for
* precreate purpose if possible */
ofd_grant(exp, fed->fed_grant, wanted * 2, left);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
RETURN(0);
}
if (pending == 0)
RETURN_EXIT;
- cfs_spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&ofd->ofd_grant_lock);
/* Don't update statfs data for errors raised before commit (e.g.
* bulk transfer failed, ...) since we know those writes have not been
* processed. For other errors hit during commit, we cannot really tell
* In any case, this should not be fatal since we always get fresh
* statfs data before failing a request with ENOSPC */
if (rc == 0) {
- cfs_spin_lock(&ofd->ofd_osfs_lock);
+ spin_lock(&ofd->ofd_osfs_lock);
/* Take pending out of cached statfs data */
ofd->ofd_osfs.os_bavail -= min_t(obd_size,
ofd->ofd_osfs.os_bavail,
/* someone is running statfs and want to be notified of
* writes happening meanwhile */
ofd->ofd_osfs_inflight += pending;
- cfs_spin_unlock(&ofd->ofd_osfs_lock);
+ spin_unlock(&ofd->ofd_osfs_lock);
}
if (exp->exp_filter_data.fed_pending < pending) {
CERROR("%s: cli %s/%p fed_pending(%lu) < grant_used(%lu)\n",
exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
exp->exp_filter_data.fed_pending, pending);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
LBUG();
}
exp->exp_filter_data.fed_pending -= pending;
"\n", exp->exp_obd->obd_name,
exp->exp_client_uuid.uuid, exp, ofd->ofd_tot_granted,
pending);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
LBUG();
}
ofd->ofd_tot_granted -= pending;
CERROR("%s: cli %s/%p tot_pending("LPU64") < grant_used(%lu)"
"\n", exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
exp, ofd->ofd_tot_pending, pending);
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
LBUG();
}
ofd->ofd_tot_pending -= pending;
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
EXIT;
}
int ofd_max_group;
obd_id ofd_last_objids[OFD_MAX_GROUPS];
- cfs_mutex_t ofd_create_locks[OFD_MAX_GROUPS];
+ struct mutex ofd_create_locks[OFD_MAX_GROUPS];
struct dt_object *ofd_lastid_obj[OFD_MAX_GROUPS];
- cfs_spinlock_t ofd_objid_lock;
+ spinlock_t ofd_objid_lock;
unsigned long ofd_destroys_in_progress;
int ofd_precreate_batch;
/* protect all statfs-related counters */
- cfs_spinlock_t ofd_osfs_lock;
+ spinlock_t ofd_osfs_lock;
/* statfs optimization: we cache a bit */
struct obd_statfs ofd_osfs;
__u64 ofd_osfs_age;
/* grants: all values in bytes */
/* grant lock to protect all grant counters */
- cfs_spinlock_t ofd_grant_lock;
+ spinlock_t ofd_grant_lock;
/* total amount of dirty data reported by clients in incoming obdo */
obd_size ofd_tot_dirty;
/* sum of filesystem space granted to clients for async writes */
int ofd_fmd_max_num; /* per ofd ofd_mod_data */
cfs_duration_t ofd_fmd_max_age; /* time to fmd expiry */
- cfs_spinlock_t ofd_flags_lock;
+ spinlock_t ofd_flags_lock;
unsigned long ofd_raid_degraded:1,
/* sync journal on writes */
ofd_syncjournal:1,
GOTO(clean, rc = -ENOMEM);
for (i = 0; i < BRW_LAST; i++)
- cfs_spin_lock_init(&stats->nid_brw_stats->hist[i].oh_lock);
+ spin_lock_init(&stats->nid_brw_stats->hist[i].oh_lock);
rc = lprocfs_seq_create(stats->nid_proc, "brw_stats", 0644,
&ofd_per_nid_stats_fops, stats);
{
int rc;
- cfs_spin_lock_init(&exp->exp_filter_data.fed_lock);
+ spin_lock_init(&exp->exp_filter_data.fed_lock);
CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
exp->exp_connecting = 1;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
/* self-export doesn't need client data and ldlm initialization */
if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
- cfs_write_lock(&fo->fo_sptlrpc_lock);
+ write_lock(&fo->fo_sptlrpc_lock);
sptlrpc_rule_set_free(&fo->fo_sptlrpc_rset);
fo->fo_sptlrpc_rset = tmp_rset;
- cfs_write_unlock(&fo->fo_sptlrpc_lock);
+ write_unlock(&fo->fo_sptlrpc_lock);
return 0;
}
{
int rc;
- cfs_spin_lock(&ofd->ofd_osfs_lock);
+ spin_lock(&ofd->ofd_osfs_lock);
if (cfs_time_before_64(ofd->ofd_osfs_age, max_age) || max_age == 0) {
obd_size unstable;
/* record value of inflight counter before running statfs to
* compute the diff once statfs is completed */
unstable = ofd->ofd_osfs_inflight;
- cfs_spin_unlock(&ofd->ofd_osfs_lock);
+ spin_unlock(&ofd->ofd_osfs_lock);
/* statfs can sleep ... hopefully not for too long since we can
* call it fairly often as space fills up */
if (unlikely(rc))
return rc;
- cfs_spin_lock(&ofd->ofd_grant_lock);
- cfs_spin_lock(&ofd->ofd_osfs_lock);
+ spin_lock(&ofd->ofd_grant_lock);
+ spin_lock(&ofd->ofd_osfs_lock);
/* calculate how much space was written while we released the
* ofd_osfs_lock */
unstable = ofd->ofd_osfs_inflight - unstable;
/* similarly, there is some uncertainty on write requests
* between prepare & commit */
ofd->ofd_osfs_unstable += ofd->ofd_tot_pending;
- cfs_spin_unlock(&ofd->ofd_grant_lock);
+ spin_unlock(&ofd->ofd_grant_lock);
/* finally udpate cached statfs data */
ofd->ofd_osfs = *osfs;
ofd->ofd_statfs_inflight--; /* stop tracking */
if (ofd->ofd_statfs_inflight == 0)
ofd->ofd_osfs_inflight = 0;
- cfs_spin_unlock(&ofd->ofd_osfs_lock);
+ spin_unlock(&ofd->ofd_osfs_lock);
if (from_cache)
*from_cache = 0;
} else {
/* use cached statfs data */
*osfs = ofd->ofd_osfs;
- cfs_spin_unlock(&ofd->ofd_osfs_lock);
+ spin_unlock(&ofd->ofd_osfs_lock);
if (from_cache)
*from_cache = 1;
}
GOTO(out_nolock, rc = 0);
}
/* This causes inflight precreates to abort and drop lock */
- cfs_set_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
- cfs_mutex_lock(&ofd->ofd_create_locks[oa->o_seq]);
- if (!cfs_test_bit(oa->o_seq, &ofd->ofd_destroys_in_progress)) {
+ set_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
+ mutex_lock(&ofd->ofd_create_locks[oa->o_seq]);
+ if (!test_bit(oa->o_seq, &ofd->ofd_destroys_in_progress)) {
CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
exp->exp_obd->obd_name, oa->o_seq);
GOTO(out, rc = 0);
rc = 0;
} else if (diff < 0) {
rc = ofd_orphans_destroy(env, exp, ofd, oa);
- cfs_clear_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
+ clear_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
} else {
/* XXX: Used by MDS for the first time! */
- cfs_clear_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
+ clear_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
}
} else {
- cfs_mutex_lock(&ofd->ofd_create_locks[oa->o_seq]);
+ mutex_lock(&ofd->ofd_create_locks[oa->o_seq]);
if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
CERROR("%s: dropping old precreate request\n",
ofd_obd(ofd)->obd_name);
ofd_info2oti(info, oti);
out:
- cfs_mutex_unlock(&ofd->ofd_create_locks[oa->o_seq]);
+ mutex_unlock(&ofd->ofd_create_locks[oa->o_seq]);
out_nolock:
if (rc == 0 && ea != NULL) {
struct lov_stripe_md *lsm = *ea;
switch (ev) {
case OBD_NOTIFY_CONFIG:
LASSERT(obd->obd_no_conn);
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
obd->obd_no_conn = 0;
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
break;
default:
CDEBUG(D_INFO, "%s: Unhandled notification %#x\n",
info->fti_pre_version != curr_version) {
CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
info->fti_pre_version, curr_version);
- cfs_spin_lock(&info->fti_exp->exp_lock);
+ spin_lock(&info->fti_exp->exp_lock);
info->fti_exp->exp_vbr_failed = 1;
- cfs_spin_unlock(&info->fti_exp->exp_lock);
+ spin_unlock(&info->fti_exp->exp_lock);
RETURN (-EOVERFLOW);
}
info->fti_pre_version = curr_version;
*/
if (info->fti_transno == 0 &&
*transno_p == ofd->ofd_lut.lut_last_transno) {
- cfs_spin_lock(&ofd->ofd_lut.lut_translock);
+ spin_lock(&ofd->ofd_lut.lut_translock);
ofd->ofd_lut.lut_lsd.lsd_last_transno =
ofd->ofd_lut.lut_last_transno;
- cfs_spin_unlock(&ofd->ofd_lut.lut_translock);
+ spin_unlock(&ofd->ofd_lut.lut_translock);
tgt_server_data_write(info->fti_env, &ofd->ofd_lut, th);
}
err = 0;
/* All operations performed by LW clients are synchronous and
* we store the committed transno in the last_rcvd header */
- cfs_spin_lock(&tg->lut_translock);
+ spin_lock(&tg->lut_translock);
if (info->fti_transno > tg->lut_lsd.lsd_last_transno) {
tg->lut_lsd.lsd_last_transno = info->fti_transno;
update = true;
}
- cfs_spin_unlock(&tg->lut_translock);
+ spin_unlock(&tg->lut_translock);
if (update)
err = tgt_server_data_write(info->fti_env, tg, th);
} else {
info->fti_has_trans = 1;
}
- cfs_spin_lock(&ofd->ofd_lut.lut_translock);
+ spin_lock(&ofd->ofd_lut.lut_translock);
if (txn->th_result != 0) {
if (info->fti_transno != 0) {
CERROR("Replay transno "LPU64" failed: rc %d\n",
if (info->fti_transno > ofd->ofd_lut.lut_last_transno)
ofd->ofd_lut.lut_last_transno = info->fti_transno;
}
- cfs_spin_unlock(&ofd->ofd_lut.lut_translock);
+ spin_unlock(&ofd->ofd_lut.lut_translock);
/** VBR: set new versions */
if (txn->th_result == 0 && info->fti_obj != NULL) {
rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
switch (rc) {
case 0:
- cfs_spin_lock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_READY;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_unlock(&oap->oap_lock);
break;
case -EALREADY:
LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
- cfs_spin_lock(&obj->oo_seatbelt);
+ spin_lock(&obj->oo_seatbelt);
LASSERT(opg->ops_submitter != NULL);
LASSERT(!cfs_list_empty(&opg->ops_inflight));
cfs_list_del_init(&opg->ops_inflight);
opg->ops_submitter = NULL;
- cfs_spin_unlock(&obj->oo_seatbelt);
+ spin_unlock(&obj->oo_seatbelt);
opg->ops_submit_time = 0;
srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
}
/* As the transfer for this page is being done, clear the flags */
- cfs_spin_lock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags = 0;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_unlock(&oap->oap_lock);
oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
- cfs_spin_lock_init(&oap->oap_lock);
+ spin_lock_init(&oap->oap_lock);
CDEBUG(D_INFO, "oap %p page %p obj off "LPU64"\n",
oap, page, oap->oap_obj_off);
RETURN(0);
if (rc)
GOTO(out, rc);
- cfs_spin_lock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_unlock(&oap->oap_lock);
if (cfs_memory_pressure_get())
ext->oe_memalloc = 1;
struct osc_async_cbargs {
bool opc_rpc_sent;
int opc_rc;
- cfs_completion_t opc_sync;
+ struct completion opc_sync;
} oi_cbarg;
};
*/
struct cl_io oo_debug_io;
/** Serialization object for osc_object::oo_debug_io. */
- cfs_mutex_t oo_debug_mutex;
+ struct mutex oo_debug_mutex;
#endif
/**
* List of pages in transfer.
* Lock, protecting ccc_object::cob_inflight, because a seat-belt is
* locked during take-off and landing.
*/
- cfs_spinlock_t oo_seatbelt;
+ spinlock_t oo_seatbelt;
/**
* used by the osc to keep track of what objects to build into rpcs.
/** Protect extent tree. Will be used to protect
* oo_{read|write}_pages soon. */
- cfs_spinlock_t oo_lock;
+ spinlock_t oo_lock;
};
static inline void osc_object_lock(struct osc_object *obj)
{
- cfs_spin_lock(&obj->oo_lock);
+ spin_lock(&obj->oo_lock);
}
static inline int osc_object_trylock(struct osc_object *obj)
{
- return cfs_spin_trylock(&obj->oo_lock);
+ return spin_trylock(&obj->oo_lock);
}
static inline void osc_object_unlock(struct osc_object *obj)
{
- cfs_spin_unlock(&obj->oo_lock);
+ spin_unlock(&obj->oo_lock);
}
static inline int osc_object_is_locked(struct osc_object *obj)
{
- return cfs_spin_is_locked(&obj->oo_lock);
+ return spin_is_locked(&obj->oo_lock);
}
/*
}
};
-cfs_lock_class_key_t osc_ast_guard_class;
+struct lock_class_key osc_ast_guard_class;
/*****************************************************************************
*
struct client_obd *oap_cli;
struct osc_object *oap_obj;
- struct ldlm_lock *oap_ldlm_lock;
- cfs_spinlock_t oap_lock;
+ struct ldlm_lock *oap_ldlm_lock;
+ spinlock_t oap_lock;
};
#define oap_page oap_brw_page.pg
cfs_list_t *ext_list, int cmd, pdl_policy_t p);
int osc_lru_shrink(struct client_obd *cli, int target);
-extern cfs_spinlock_t osc_ast_guard;
+extern spinlock_t osc_ast_guard;
int osc_cleanup(struct obd_device *obd);
int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
struct osc_async_cbargs *args = a;
args->opc_rc = rc;
- cfs_complete(&args->opc_sync);
+ complete(&args->opc_sync);
return 0;
}
oinfo.oi_oa = oa;
oinfo.oi_capa = io->u.ci_setattr.sa_capa;
- cfs_init_completion(&cbargs->opc_sync);
+ init_completion(&cbargs->opc_sync);
if (ia_valid & ATTR_SIZE)
result = osc_punch_base(osc_export(cl2osc(obj)),
int result = 0;
if (cbargs->opc_rpc_sent) {
- cfs_wait_for_completion(&cbargs->opc_sync);
+ wait_for_completion(&cbargs->opc_sync);
result = io->ci_result = cbargs->opc_rc;
}
if (result == 0) {
memset(oinfo, 0, sizeof(*oinfo));
oinfo->oi_oa = oa;
oinfo->oi_capa = fio->fi_capa;
- cfs_init_completion(&cbargs->opc_sync);
+ init_completion(&cbargs->opc_sync);
rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
PTLRPCD_SET);
struct osc_io *oio = cl2osc_io(env, slice);
struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
- cfs_wait_for_completion(&cbargs->opc_sync);
+ wait_for_completion(&cbargs->opc_sync);
if (result == 0)
result = cbargs->opc_rc;
}
*/
static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
{
- struct ldlm_lock *dlmlock;
+ struct ldlm_lock *dlmlock;
- cfs_spin_lock(&osc_ast_guard);
- dlmlock = olck->ols_lock;
- if (dlmlock == NULL) {
- cfs_spin_unlock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
+ dlmlock = olck->ols_lock;
+ if (dlmlock == NULL) {
+ spin_unlock(&osc_ast_guard);
return;
}
* call to osc_lock_detach() */
dlmlock->l_ast_data = NULL;
olck->ols_handle.cookie = 0ULL;
- cfs_spin_unlock(&osc_ast_guard);
+ spin_unlock(&osc_ast_guard);
lock_res_and_lock(dlmlock);
if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
* Global spin-lock protecting consistency of ldlm_lock::l_ast_data
* pointers. Initialized in osc_init().
*/
-cfs_spinlock_t osc_ast_guard;
+spinlock_t osc_ast_guard;
static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
{
- struct osc_lock *olck;
+ struct osc_lock *olck;
- lock_res_and_lock(dlm_lock);
- cfs_spin_lock(&osc_ast_guard);
+ lock_res_and_lock(dlm_lock);
+ spin_lock(&osc_ast_guard);
olck = dlm_lock->l_ast_data;
if (olck != NULL) {
struct cl_lock *lock = olck->ols_cl.cls_lock;
} else
olck = NULL;
}
- cfs_spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(dlm_lock);
- return olck;
+ spin_unlock(&osc_ast_guard);
+ unlock_res_and_lock(dlm_lock);
+ return olck;
}
static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
LASSERT(dlmlock != NULL);
lock_res_and_lock(dlmlock);
- cfs_spin_lock(&osc_ast_guard);
- LASSERT(dlmlock->l_ast_data == olck);
- LASSERT(olck->ols_lock == NULL);
- olck->ols_lock = dlmlock;
- cfs_spin_unlock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
+ LASSERT(dlmlock->l_ast_data == olck);
+ LASSERT(olck->ols_lock == NULL);
+ olck->ols_lock = dlmlock;
+ spin_unlock(&osc_ast_guard);
/*
* Lock might be not yet granted. In this case, completion ast
dlmlock = ldlm_handle2lock(&olck->ols_handle);
if (dlmlock != NULL) {
lock_res_and_lock(dlmlock);
- cfs_spin_lock(&osc_ast_guard);
- LASSERT(olck->ols_lock == NULL);
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- cfs_spin_unlock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
+ LASSERT(olck->ols_lock == NULL);
+ dlmlock->l_ast_data = NULL;
+ olck->ols_handle.cookie = 0ULL;
+ spin_unlock(&osc_ast_guard);
ldlm_lock_fail_match_locked(dlmlock);
unlock_res_and_lock(dlmlock);
LDLM_LOCK_PUT(dlmlock);
if (olck->ols_glimpse)
return 0;
- cfs_spin_lock(&hdr->coh_lock_guard);
+ spin_lock(&hdr->coh_lock_guard);
cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
struct cl_lock_descr *cld = &scan->cll_descr;
const struct osc_lock *scan_ols;
conflict = scan;
break;
}
- cfs_spin_unlock(&hdr->coh_lock_guard);
+ spin_unlock(&hdr->coh_lock_guard);
if (conflict) {
if (lock->cll_descr.cld_mode == CLM_GROUP) {
lock = olck->ols_cl.cls_lock;
descr = &lock->cll_descr;
- cfs_mutex_lock(&oob->oo_debug_mutex);
+ mutex_lock(&oob->oo_debug_mutex);
io->ci_obj = cl_object_top(obj);
io->ci_ignore_layout = 1;
cfs_cond_resched();
} while (result != CLP_GANG_OKAY);
cl_io_fini(env, io);
- cfs_mutex_unlock(&oob->oo_debug_mutex);
+ mutex_unlock(&oob->oo_debug_mutex);
cl_env_nested_put(&nest, env);
return (result == CLP_GANG_ABORT);
int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
{
- struct osc_lock *olock;
- int rc = 0;
+ struct osc_lock *olock;
+ int rc = 0;
- cfs_spin_lock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
olock = dlm->l_ast_data;
/*
* there's a very rare race with osc_page_addref_lock(), but that
cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
rc = 1;
}
- cfs_spin_unlock(&osc_ast_guard);
- return rc;
+ spin_unlock(&osc_ast_guard);
+ return rc;
}
/** @} osc */
osc->oo_oinfo = cconf->u.coc_oinfo;
#ifdef INVARIANT_CHECK
- cfs_mutex_init(&osc->oo_debug_mutex);
+ mutex_init(&osc->oo_debug_mutex);
#endif
- cfs_spin_lock_init(&osc->oo_seatbelt);
+ spin_lock_init(&osc->oo_seatbelt);
for (i = 0; i < CRT_NR; ++i)
CFS_INIT_LIST_HEAD(&osc->oo_inflight[i]);
CFS_INIT_LIST_HEAD(&osc->oo_reading_exts);
cfs_atomic_set(&osc->oo_nr_reads, 0);
cfs_atomic_set(&osc->oo_nr_writes, 0);
- cfs_spin_lock_init(&osc->oo_lock);
+ spin_lock_init(&osc->oo_lock);
return 0;
}
descr->cld_mode = mode;
descr->cld_start = page->cp_index;
descr->cld_end = page->cp_index;
- cfs_spin_lock(&hdr->coh_lock_guard);
+ spin_lock(&hdr->coh_lock_guard);
cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
/*
* Lock-less sub-lock has to be either in HELD state
break;
}
}
- cfs_spin_unlock(&hdr->coh_lock_guard);
+ spin_unlock(&hdr->coh_lock_guard);
}
return result;
}
* first and then use it as inflight. */
osc_lru_del(osc_cli(obj), opg, false);
- cfs_spin_lock(&obj->oo_seatbelt);
- cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
- opg->ops_submitter = cfs_current();
- cfs_spin_unlock(&obj->oo_seatbelt);
+ spin_lock(&obj->oo_seatbelt);
+ cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+ opg->ops_submitter = cfs_current();
+ spin_unlock(&obj->oo_seatbelt);
}
static int osc_page_cache_add(const struct lu_env *env,
LASSERT(0);
}
- cfs_spin_lock(&obj->oo_seatbelt);
+ spin_lock(&obj->oo_seatbelt);
if (opg->ops_submitter != NULL) {
LASSERT(!cfs_list_empty(&opg->ops_inflight));
cfs_list_del_init(&opg->ops_inflight);
opg->ops_submitter = NULL;
}
- cfs_spin_unlock(&obj->oo_seatbelt);
+ spin_unlock(&obj->oo_seatbelt);
osc_lru_del(osc_cli(obj), opg, true);
EXIT;
opg->ops_from = from;
opg->ops_to = to;
- cfs_spin_lock(&oap->oap_lock);
- oap->oap_async_flags |= ASYNC_COUNT_STABLE;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
+ oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
}
static int osc_page_cancel(const struct lu_env *env,
/* Reclaim LRU slots from other client_obd as it can't free enough
* from its own. This should rarely happen. */
- cfs_spin_lock(&cache->ccc_lru_lock);
+ spin_lock(&cache->ccc_lru_lock);
cache->ccc_lru_shrinkers++;
cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
cfs_list_for_each_entry_safe(victim, tmp, &cache->ccc_lru, cl_lru_osc) {
if (cfs_atomic_read(&victim->cl_lru_in_list) > 0)
break;
}
- cfs_spin_unlock(&cache->ccc_lru_lock);
+ spin_unlock(&cache->ccc_lru_lock);
if (victim == cli) {
CDEBUG(D_CACHE, "%s: can't get any free LRU slots.\n",
cli->cl_import->imp_obd->obd_name);
LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
lock_res_and_lock(lock);
- cfs_spin_lock(&osc_ast_guard);
+ spin_lock(&osc_ast_guard);
- if (lock->l_ast_data == NULL)
- lock->l_ast_data = data;
- if (lock->l_ast_data == data)
- set = 1;
+ if (lock->l_ast_data == NULL)
+ lock->l_ast_data = data;
+ if (lock->l_ast_data == data)
+ set = 1;
- cfs_spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(lock);
+ spin_unlock(&osc_ast_guard);
+ unlock_res_and_lock(lock);
- return set;
+ return set;
}
static int osc_set_data_with_check(struct lustre_handle *lockh,
/*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
- cfs_down_read(&obd->u.cli.cl_sem);
+ down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
imp = class_import_get(obd->u.cli.cl_import);
- cfs_up_read(&obd->u.cli.cl_sem);
+ up_read(&obd->u.cli.cl_sem);
if (!imp)
RETURN(-ENODEV);
/* add this osc into entity list */
LASSERT(cfs_list_empty(&cli->cl_lru_osc));
- cfs_spin_lock(&cli->cl_cache->ccc_lru_lock);
+ spin_lock(&cli->cl_cache->ccc_lru_lock);
cfs_list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
- cfs_spin_unlock(&cli->cl_cache->ccc_lru_lock);
+ spin_unlock(&cli->cl_cache->ccc_lru_lock);
RETURN(0);
}
CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
/* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
ptlrpc_deactivate_import(imp);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_pingable = 0;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_pingable = 0;
+ spin_unlock(&imp->imp_lock);
break;
}
case OBD_CLEANUP_EXPORTS: {
/* lru cleanup */
if (cli->cl_cache != NULL) {
LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_users) > 0);
- cfs_spin_lock(&cli->cl_cache->ccc_lru_lock);
+ spin_lock(&cli->cl_cache->ccc_lru_lock);
cfs_list_del_init(&cli->cl_lru_osc);
- cfs_spin_unlock(&cli->cl_cache->ccc_lru_lock);
+ spin_unlock(&cli->cl_cache->ccc_lru_lock);
cli->cl_lru_left = NULL;
cfs_atomic_dec(&cli->cl_cache->ccc_users);
cli->cl_cache = NULL;
};
extern struct lu_kmem_descr osc_caches[];
-extern cfs_spinlock_t osc_ast_guard;
-extern cfs_lock_class_key_t osc_ast_guard_class;
+extern spinlock_t osc_ast_guard;
+extern struct lock_class_key osc_ast_guard_class;
int __init osc_init(void)
{
RETURN(rc);
}
- cfs_spin_lock_init(&osc_ast_guard);
- cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+ spin_lock_init(&osc_ast_guard);
+ lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
- RETURN(rc);
+ RETURN(rc);
}
#ifdef __KERNEL__
struct osd_compat_objid_seq {
/* protects on-fly initialization */
- cfs_semaphore_t dir_init_sem;
+ struct semaphore dir_init_sem;
/* file storing last created objid */
struct osd_inode_id last_id;
struct dentry *groot; /* O/<seq> */
if (grp->groot != NULL)
RETURN(0);
- cfs_down(&grp->dir_init_sem);
+ down(&grp->dir_init_sem);
sprintf(name, "%d", seq);
d = simple_mkdir(map->root, osd->od_mnt, name, 0755, 1);
if (rc)
osd_compat_seq_fini(osd, seq);
out:
- cfs_up(&grp->dir_init_sem);
+ up(&grp->dir_init_sem);
RETURN(rc);
}
/* Initialize all groups */
for (i = 0; i < MAX_OBJID_GROUP; i++) {
- cfs_sema_init(&dev->od_ost_map->groups[i].dir_init_sem, 1);
+ sema_init(&dev->od_ost_map->groups[i].dir_init_sem, 1);
rc = osd_compat_seq_init(dev, i);
if (rc) {
osd_compat_fini(dev);
dt_object_init(&mo->oo_dt, NULL, d);
mo->oo_dt.do_ops = &osd_obj_ea_ops;
l->lo_ops = &osd_lu_obj_ops;
- cfs_init_rwsem(&mo->oo_sem);
- cfs_init_rwsem(&mo->oo_ext_idx_sem);
- cfs_spin_lock_init(&mo->oo_guard);
+ init_rwsem(&mo->oo_sem);
+ init_rwsem(&mo->oo_ext_idx_sem);
+ spin_lock_init(&mo->oo_guard);
return l;
} else {
return NULL;
ksfs = &osd_oti_get(env)->oti_ksfs;
}
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
/* cache 1 second */
if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
result = sb->s_op->statfs(sb->s_root, ksfs);
}
}
- if (likely(result == 0))
- *sfs = osd->od_statfs;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ if (likely(result == 0))
+ *sfs = osd->od_statfs;
+ spin_unlock(&osd->od_osfs_lock);
if (unlikely(env == NULL))
OBD_FREE_PTR(ksfs);
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_owner != env);
- cfs_down_read_nested(&obj->oo_sem, role);
+ down_read_nested(&obj->oo_sem, role);
LASSERT(obj->oo_owner == NULL);
oti->oti_r_locks++;
LINVRNT(osd_invariant(obj));
LASSERT(obj->oo_owner != env);
- cfs_down_write_nested(&obj->oo_sem, role);
+ down_write_nested(&obj->oo_sem, role);
LASSERT(obj->oo_owner == NULL);
obj->oo_owner = env;
LASSERT(oti->oti_r_locks > 0);
oti->oti_r_locks--;
- cfs_up_read(&obj->oo_sem);
+ up_read(&obj->oo_sem);
}
static void osd_object_write_unlock(const struct lu_env *env,
LASSERT(oti->oti_w_locks > 0);
oti->oti_w_locks--;
obj->oo_owner = NULL;
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
static int osd_object_write_locked(const struct lu_env *env,
RETURN(-ESTALE);
}
- cfs_spin_lock(&capa_lock);
- for (i = 0; i < 2; i++) {
- if (keys[i].lk_keyid == capa->lc_keyid) {
- oti->oti_capa_key = keys[i];
- break;
- }
- }
- cfs_spin_unlock(&capa_lock);
+ spin_lock(&capa_lock);
+ for (i = 0; i < 2; i++) {
+ if (keys[i].lk_keyid == capa->lc_keyid) {
+ oti->oti_capa_key = keys[i];
+ break;
+ }
+ }
+ spin_unlock(&capa_lock);
if (i == 2) {
DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
return -EACCES;
- cfs_spin_lock(&obj->oo_guard);
- osd_inode_getattr(env, obj->oo_inode, attr);
- cfs_spin_unlock(&obj->oo_guard);
- return 0;
+ spin_lock(&obj->oo_guard);
+ osd_inode_getattr(env, obj->oo_inode, attr);
+ spin_unlock(&obj->oo_guard);
+ return 0;
}
static int osd_declare_attr_set(const struct lu_env *env,
if (rc)
return rc;
- cfs_spin_lock(&obj->oo_guard);
- rc = osd_inode_setattr(env, inode, attr);
- cfs_spin_unlock(&obj->oo_guard);
+ spin_lock(&obj->oo_guard);
+ rc = osd_inode_setattr(env, inode, attr);
+ spin_unlock(&obj->oo_guard);
if (!rc)
inode->i_sb->s_op->dirty_inode(inode);
/* Parallel control for OI scrub. For most of cases, there is no
* lock contention. So it will not affect unlink performance. */
- cfs_mutex_lock(&inode->i_mutex);
+ mutex_lock(&inode->i_mutex);
if (S_ISDIR(inode->i_mode)) {
LASSERT(osd_inode_unlinked(inode) ||
inode->i_nlink == 1);
- cfs_spin_lock(&obj->oo_guard);
+ spin_lock(&obj->oo_guard);
clear_nlink(inode);
- cfs_spin_unlock(&obj->oo_guard);
+ spin_unlock(&obj->oo_guard);
inode->i_sb->s_op->dirty_inode(inode);
} else {
LASSERT(osd_inode_unlinked(inode));
OSD_EXEC_OP(th, destroy);
result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
- cfs_mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&inode->i_mutex);
/* XXX: add to ext3 orphan list */
/* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
* at some point. Both e2fsprogs and any Lustre-supported ldiskfs
* do not actually care whether this flag is set or not.
*/
- cfs_spin_lock(&obj->oo_guard);
+ spin_lock(&obj->oo_guard);
/* inc_nlink from 0 may cause WARN_ON */
if(inode->i_nlink == 0)
set_nlink(inode, 1);
set_nlink(inode, 1);
}
LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
- cfs_spin_unlock(&obj->oo_guard);
+ spin_unlock(&obj->oo_guard);
inode->i_sb->s_op->dirty_inode(inode);
LINVRNT(osd_invariant(obj));
OSD_EXEC_OP(th, ref_del);
- cfs_spin_lock(&obj->oo_guard);
+ spin_lock(&obj->oo_guard);
LASSERT(inode->i_nlink > 0);
drop_nlink(inode);
/* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
* inode will be deleted incorrectly. */
if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
set_nlink(inode, 1);
- cfs_spin_unlock(&obj->oo_guard);
+ spin_unlock(&obj->oo_guard);
inode->i_sb->s_op->dirty_inode(inode);
LINVRNT(osd_invariant(obj));
RETURN(oc);
}
- cfs_spin_lock(&capa_lock);
- *key = dev->od_capa_keys[1];
- cfs_spin_unlock(&capa_lock);
+ spin_lock(&capa_lock);
+ *key = dev->od_capa_keys[1];
+ spin_unlock(&capa_lock);
capa->lc_keyid = key->lk_keyid;
capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
OBD_ALLOC_PTR(dir);
if (dir != NULL) {
- cfs_spin_lock(&obj->oo_guard);
- if (obj->oo_dir == NULL)
- obj->oo_dir = dir;
- else
- /*
- * Concurrent thread allocated container data.
- */
- OBD_FREE_PTR(dir);
- cfs_spin_unlock(&obj->oo_guard);
- /*
- * Now, that we have container data, serialize its
- * initialization.
- */
- cfs_down_write(&obj->oo_ext_idx_sem);
- /*
- * recheck under lock.
- */
- if (!osd_has_index(obj))
- result = osd_iam_container_init(env, obj, dir);
- else
- result = 0;
- cfs_up_write(&obj->oo_ext_idx_sem);
+ spin_lock(&obj->oo_guard);
+ if (obj->oo_dir == NULL)
+ obj->oo_dir = dir;
+ else
+ /*
+ * Concurrent thread allocated container data.
+ */
+ OBD_FREE_PTR(dir);
+ spin_unlock(&obj->oo_guard);
+ /*
+ * Now, that we have container data, serialize its
+ * initialization.
+ */
+ down_write(&obj->oo_ext_idx_sem);
+ /*
+ * recheck under lock.
+ */
+ if (!osd_has_index(obj))
+ result = osd_iam_container_init(env, obj, dir);
+ else
+ result = 0;
+ up_write(&obj->oo_ext_idx_sem);
} else {
result = -ENOMEM;
}
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
dir, LDISKFS_HLOCK_DEL);
} else {
- cfs_down_write(&obj->oo_ext_idx_sem);
+ down_write(&obj->oo_ext_idx_sem);
}
bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_write(&obj->oo_ext_idx_sem);
+ up_write(&obj->oo_ext_idx_sem);
LASSERT(osd_invariant(obj));
RETURN(rc);
ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
pobj->oo_inode, 0);
} else {
- cfs_down_write(&pobj->oo_ext_idx_sem);
+ down_write(&pobj->oo_ext_idx_sem);
}
rc = osd_add_dot_dotdot(info, pobj, cinode, name,
(struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
pobj->oo_inode, LDISKFS_HLOCK_ADD);
} else {
- cfs_down_write(&pobj->oo_ext_idx_sem);
+ down_write(&pobj->oo_ext_idx_sem);
}
rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_write(&pobj->oo_ext_idx_sem);
+ up_write(&pobj->oo_ext_idx_sem);
return rc;
}
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
dir, LDISKFS_HLOCK_LOOKUP);
} else {
- cfs_down_read(&obj->oo_ext_idx_sem);
+ down_read(&obj->oo_ext_idx_sem);
}
bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_read(&obj->oo_ext_idx_sem);
+ up_read(&obj->oo_ext_idx_sem);
return rc;
}
ldiskfs_htree_lock(hlock, obj->oo_hl_head,
inode, LDISKFS_HLOCK_READDIR);
} else {
- cfs_down_read(&obj->oo_ext_idx_sem);
+ down_read(&obj->oo_ext_idx_sem);
}
result = inode->i_fop->readdir(&it->oie_file, it,
if (hlock != NULL)
ldiskfs_htree_unlock(hlock);
else
- cfs_up_read(&obj->oo_ext_idx_sem);
+ up_read(&obj->oo_ext_idx_sem);
if (it->oie_rd_dirent == 0) {
result = -EIO;
l->ld_ops = &osd_lu_ops;
o->od_dt_dev.dd_ops = &osd_dt_ops;
- cfs_spin_lock_init(&o->od_osfs_lock);
- cfs_mutex_init(&o->od_otable_mutex);
+ spin_lock_init(&o->od_osfs_lock);
+ mutex_init(&o->od_otable_mutex);
o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
*exp = class_conn2export(&conn);
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
osd->od_connects++;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ spin_unlock(&osd->od_osfs_lock);
RETURN(0);
}
ENTRY;
/* Only disconnect the underlying layers on the final disconnect. */
- cfs_spin_lock(&osd->od_osfs_lock);
+ spin_lock(&osd->od_osfs_lock);
osd->od_connects--;
if (osd->od_connects == 0)
release = 1;
- cfs_spin_unlock(&osd->od_osfs_lock);
+ spin_unlock(&osd->od_osfs_lock);
rc = class_disconnect(exp); /* bz 9811 */
idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
c->ic_descr->id_root_gap +
sizeof(struct dx_countlimit));
- cfs_down(&c->ic_idle_sem);
+ down(&c->ic_idle_sem);
bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
if (bh != NULL && IS_ERR(bh))
result = PTR_ERR(bh);
else
c->ic_idle_bh = bh;
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
}
return result;
memset(c, 0, sizeof *c);
c->ic_descr = descr;
c->ic_object = inode;
- cfs_init_rwsem(&c->ic_sem);
+ init_rwsem(&c->ic_sem);
dynlock_init(&c->ic_tree_lock);
- cfs_sema_init(&c->ic_idle_sem, 1);
+ sema_init(&c->ic_idle_sem, 1);
return 0;
}
EXPORT_SYMBOL(iam_container_init);
void iam_container_write_lock(struct iam_container *ic)
{
- cfs_down_write(&ic->ic_sem);
+ down_write(&ic->ic_sem);
}
void iam_container_write_unlock(struct iam_container *ic)
{
- cfs_up_write(&ic->ic_sem);
+ up_write(&ic->ic_sem);
}
void iam_container_read_lock(struct iam_container *ic)
{
- cfs_down_read(&ic->ic_sem);
+ down_read(&ic->ic_sem);
}
void iam_container_read_unlock(struct iam_container *ic)
{
- cfs_up_read(&ic->ic_sem);
+ up_read(&ic->ic_sem);
}
/*
if (c->ic_idle_bh == NULL)
goto newblock;
- cfs_down(&c->ic_idle_sem);
+ down(&c->ic_idle_sem);
if (unlikely(c->ic_idle_bh == NULL)) {
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
goto newblock;
}
if (*e != 0)
goto fail;
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
bh = ldiskfs_bread(NULL, inode, *b, 0, e);
if (bh == NULL)
return NULL;
}
c->ic_idle_bh = idle;
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
got:
/* get write access for the found buffer head */
return bh;
fail:
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
ldiskfs_std_error(inode->i_sb, *e);
return NULL;
}
int count;
int rc;
- cfs_down(&c->ic_idle_sem);
+ down(&c->ic_idle_sem);
if (unlikely(c->ic_idle_failed)) {
rc = -EFAULT;
goto unlock;
rc = iam_txn_dirty(h, p, c->ic_idle_bh);
unlock:
- cfs_up(&c->ic_idle_sem);
+ up(&c->ic_idle_sem);
if (rc != 0)
CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
/*
* read-write lock protecting index consistency.
*/
- cfs_rw_semaphore_t ic_sem;
+ struct rw_semaphore ic_sem;
struct dynlock ic_tree_lock;
/*
* Protect ic_idle_bh
*/
- cfs_semaphore_t ic_idle_sem;
+ struct semaphore ic_idle_sem;
/*
* BH for idle blocks
*/
{
DX_DEVAL(iam_lock_stats.dls_bh_lock++);
#ifdef CONFIG_SMP
- while (cfs_test_and_set_bit(BH_DXLock, &bh->b_state)) {
- DX_DEVAL(iam_lock_stats.dls_bh_busy++);
- while (cfs_test_bit(BH_DXLock, &bh->b_state))
+ while (test_and_set_bit(BH_DXLock, &bh->b_state)) {
+ DX_DEVAL(iam_lock_stats.dls_bh_busy++);
+ while (test_bit(BH_DXLock, &bh->b_state))
cpu_relax();
}
#endif
* to protect index ops.
*/
struct htree_lock_head *oo_hl_head;
- cfs_rw_semaphore_t oo_ext_idx_sem;
- cfs_rw_semaphore_t oo_sem;
- struct osd_directory *oo_dir;
- /** protects inode attributes. */
- cfs_spinlock_t oo_guard;
+ struct rw_semaphore oo_ext_idx_sem;
+ struct rw_semaphore oo_sem;
+ struct osd_directory *oo_dir;
+ /** protects inode attributes. */
+ spinlock_t oo_guard;
/**
* Following two members are used to indicate the presence of dot and
* dotdot in the given directory. This is required for interop mode
*/
cfs_time_t od_osfs_age;
struct obd_statfs od_statfs;
- cfs_spinlock_t od_osfs_lock;
+ spinlock_t od_osfs_lock;
unsigned int od_noscrub:1;
- struct fsfilt_operations *od_fsops;
+ struct fsfilt_operations *od_fsops;
int od_connects;
struct lu_site od_site;
cfs_atomic_t od_r_in_flight;
cfs_atomic_t od_w_in_flight;
- cfs_mutex_t od_otable_mutex;
+ struct mutex od_otable_mutex;
struct osd_otable_it *od_otable_it;
struct osd_scrub od_scrub;
}
/* the check is outside of the cycle for performance reason -bzzz */
- if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
+ if (!test_bit(BIO_RW, &bio->bi_rw)) {
bio_for_each_segment(bvl, bio, i) {
if (likely(error == 0))
SetPageUptodate(bvl->bv_page);
int err;
/* prevent reading after eof */
- cfs_spin_lock(&inode->i_lock);
- if (i_size_read(inode) < *offs + size) {
+ spin_lock(&inode->i_lock);
+ if (i_size_read(inode) < *offs + size) {
loff_t diff = i_size_read(inode) - *offs;
- cfs_spin_unlock(&inode->i_lock);
+ spin_unlock(&inode->i_lock);
if (diff < 0) {
CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
i_size_read(inode), *offs);
} else {
size = diff;
}
- } else {
- cfs_spin_unlock(&inode->i_lock);
- }
+ } else {
+ spin_unlock(&inode->i_lock);
+ }
blocksize = 1 << inode->i_blkbits;
osize = size;
--new_size;
/* correct in-core and on-disk sizes */
if (new_size > i_size_read(inode)) {
- cfs_spin_lock(&inode->i_lock);
- if (new_size > i_size_read(inode))
- i_size_write(inode, new_size);
- if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
- LDISKFS_I(inode)->i_disksize = i_size_read(inode);
- dirty_inode = 1;
- }
- cfs_spin_unlock(&inode->i_lock);
+ spin_lock(&inode->i_lock);
+ if (new_size > i_size_read(inode))
+ i_size_write(inode, new_size);
+ if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
+ LDISKFS_I(inode)->i_disksize = i_size_read(inode);
+ dirty_inode = 1;
+ }
+ spin_unlock(&inode->i_lock);
if (dirty_inode)
inode->i_sb->s_op->dirty_inode(inode);
}
ENTRY;
for (i = 0; i < BRW_LAST; i++)
- cfs_spin_lock_init(&osd->od_brw_stats.hist[i].oh_lock);
+ spin_lock_init(&osd->od_brw_stats.hist[i].oh_lock);
osd->od_stats = lprocfs_alloc_stats(LPROC_OSD_LAST, 0);
if (osd->od_stats != NULL) {
"it's only valid for new filesystem.");
/** to serialize concurrent OI index initialization */
-static cfs_mutex_t oi_init_lock;
+static struct mutex oi_init_lock;
static struct dt_index_features oi_feat = {
.dif_flags = DT_IND_UPDATE,
if (oi == NULL)
RETURN(-ENOMEM);
- cfs_mutex_lock(&oi_init_lock);
+ mutex_lock(&oi_init_lock);
/* try to open existing multiple OIs first */
rc = osd_oi_table_open(info, osd, oi, sf->sf_oi_count, false);
if (rc < 0)
rc = 0;
}
- cfs_mutex_unlock(&oi_init_lock);
+ mutex_unlock(&oi_init_lock);
return rc;
}
osd_oi_count = size_roundup_power2(osd_oi_count);
}
- cfs_mutex_init(&oi_init_lock);
+ mutex_init(&oi_init_lock);
return 0;
}
int rc;
ENTRY;
- cfs_down_write(&scrub->os_rwsem);
+ down_write(&scrub->os_rwsem);
if (flags & SS_SET_FAILOUT)
sf->sf_param |= SP_FAILOUT;
sf->sf_time_last_checkpoint = sf->sf_time_latest_start;
rc = osd_scrub_file_store(scrub);
if (rc == 0) {
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
thread_set_flags(thread, SVC_RUNNING);
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
}
- cfs_up_write(&scrub->os_rwsem);
+ up_write(&scrub->os_rwsem);
RETURN(rc);
}
struct osd_scrub *scrub = &dev->od_scrub;
struct scrub_file *sf = &scrub->os_file;
- cfs_down_write(&scrub->os_rwsem);
+ down_write(&scrub->os_rwsem);
scrub->os_new_checked++;
sf->sf_items_failed++;
if (sf->sf_pos_first_inconsistent == 0 ||
sf->sf_pos_first_inconsistent > lid->oii_ino)
sf->sf_pos_first_inconsistent = lid->oii_ino;
- cfs_up_write(&scrub->os_rwsem);
+ up_write(&scrub->os_rwsem);
return sf->sf_param & SP_FAILOUT ? rc : 0;
}
oii = cfs_list_entry(oic, struct osd_inconsistent_item,
oii_cache);
- cfs_down_write(&scrub->os_rwsem);
+ down_write(&scrub->os_rwsem);
scrub->os_new_checked++;
if (lid->oii_ino < sf->sf_pos_latest_start && oii == NULL)
GOTO(out, rc = 0);
}
/* Prevent the inode to be unlinked during OI scrub. */
- cfs_mutex_lock(&inode->i_mutex);
+ mutex_lock(&inode->i_mutex);
if (unlikely(inode->i_nlink == 0)) {
- cfs_mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&inode->i_mutex);
iput(inode);
GOTO(out, rc = 0);
}
}
if (ops == DTO_INDEX_INSERT) {
- cfs_mutex_unlock(&inode->i_mutex);
+ mutex_unlock(&inode->i_mutex);
iput(inode);
}
- cfs_up_write(&scrub->os_rwsem);
+ up_write(&scrub->os_rwsem);
if (oii != NULL) {
LASSERT(!cfs_list_empty(&oii->oii_list));
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
cfs_list_del_init(&oii->oii_list);
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
OBD_FREE_PTR(oii);
}
RETURN(sf->sf_param & SP_FAILOUT ? rc : 0);
int rc;
ENTRY;
- cfs_down_write(&scrub->os_rwsem);
+ down_write(&scrub->os_rwsem);
sf->sf_items_checked += scrub->os_new_checked;
scrub->os_new_checked = 0;
sf->sf_pos_last_checkpoint = scrub->os_pos_current;
sf->sf_run_time += cfs_duration_sec(cfs_time_current() + HALF_SEC -
scrub->os_time_last_checkpoint);
rc = osd_scrub_file_store(scrub);
- cfs_up_write(&scrub->os_rwsem);
+ up_write(&scrub->os_rwsem);
RETURN(rc);
}
struct scrub_file *sf = &scrub->os_file;
ENTRY;
- cfs_down_write(&scrub->os_rwsem);
- cfs_spin_lock(&scrub->os_lock);
+ down_write(&scrub->os_rwsem);
+ spin_lock(&scrub->os_lock);
thread_set_flags(&scrub->os_thread, SVC_STOPPING);
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
if (scrub->os_new_checked > 0) {
sf->sf_items_checked += scrub->os_new_checked;
scrub->os_new_checked = 0;
CERROR("%.16s: fail to osd_scrub_post, rc = %d\n",
LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name,
result);
- cfs_up_write(&scrub->os_rwsem);
+ up_write(&scrub->os_rwsem);
EXIT;
}
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
thread_set_flags(thread, SVC_STOPPING);
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
return SCRUB_NEXT_CRASH;
}
}
if (items != NULL) {
- cfs_down_write(&scrub->os_rwsem);
+ down_write(&scrub->os_rwsem);
scrub->os_new_checked++;
(*items)++;
- cfs_up_write(&scrub->os_rwsem);
+ up_write(&scrub->os_rwsem);
goto next;
}
lu_env_fini(&env);
noenv:
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
thread_set_flags(thread, SVC_STOPPED);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
return rc;
}
again:
/* os_lock: sync status between stop and scrub thread */
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
if (thread_is_running(thread)) {
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
RETURN(-EALREADY);
} else if (unlikely(thread_is_stopping(thread))) {
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
goto again;
}
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
if (scrub->os_file.sf_status == SS_COMPLETED)
flags |= SS_RESET;
ENTRY;
/* od_otable_mutex: prevent curcurrent start/stop */
- cfs_mutex_lock(&dev->od_otable_mutex);
+ mutex_lock(&dev->od_otable_mutex);
rc = do_osd_scrub_start(dev, SS_AUTO);
- cfs_mutex_unlock(&dev->od_otable_mutex);
+ mutex_unlock(&dev->od_otable_mutex);
RETURN(rc == -EALREADY ? 0 : rc);
}
struct l_wait_info lwi = { 0 };
/* os_lock: sync status between stop and scrub thread */
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
thread_set_flags(thread, SVC_STOPPING);
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
cfs_waitq_broadcast(&thread->t_ctl_waitq);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
/* Do not skip the last lock/unlock, which can guarantee that
* the caller cannot return until the OI scrub thread exit. */
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
}
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
}
static void osd_scrub_stop(struct osd_device *dev)
{
/* od_otable_mutex: prevent curcurrent start/stop */
- cfs_mutex_lock(&dev->od_otable_mutex);
+ mutex_lock(&dev->od_otable_mutex);
dev->od_scrub.os_paused = 1;
do_osd_scrub_stop(&dev->od_scrub);
- cfs_mutex_unlock(&dev->od_otable_mutex);
+ mutex_unlock(&dev->od_otable_mutex);
}
static const char osd_scrub_name[] = "OI_scrub";
ctxt->fs = get_ds();
cfs_waitq_init(&scrub->os_thread.t_ctl_waitq);
- cfs_init_rwsem(&scrub->os_rwsem);
- cfs_spin_lock_init(&scrub->os_lock);
+ init_rwsem(&scrub->os_rwsem);
+ spin_lock_init(&scrub->os_lock);
CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
push_ctxt(&saved, ctxt, NULL);
ENTRY;
/* od_otable_mutex: prevent curcurrent init/fini */
- cfs_mutex_lock(&dev->od_otable_mutex);
+ mutex_lock(&dev->od_otable_mutex);
if (dev->od_otable_it != NULL)
GOTO(out, it = ERR_PTR(-EALREADY));
GOTO(out, it);
out:
- cfs_mutex_unlock(&dev->od_otable_mutex);
+ mutex_unlock(&dev->od_otable_mutex);
return (struct dt_it *)it;
}
struct osd_device *dev = it->ooi_dev;
/* od_otable_mutex: prevent curcurrent init/fini */
- cfs_mutex_lock(&dev->od_otable_mutex);
+ mutex_lock(&dev->od_otable_mutex);
do_osd_scrub_stop(&dev->od_scrub);
LASSERT(dev->od_otable_it == it);
dev->od_otable_it = NULL;
- cfs_mutex_unlock(&dev->od_otable_mutex);
+ mutex_unlock(&dev->od_otable_mutex);
OBD_FREE_PTR(it);
}
struct osd_device *dev = ((struct osd_otable_it *)di)->ooi_dev;
/* od_otable_mutex: prevent curcurrent init/fini */
- cfs_mutex_lock(&dev->od_otable_mutex);
+ mutex_lock(&dev->od_otable_mutex);
dev->od_scrub.os_paused = 1;
- cfs_mutex_unlock(&dev->od_otable_mutex);
+ mutex_unlock(&dev->od_otable_mutex);
}
/**
oii->oii_cache = *oic;
oii->oii_insert = insert;
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
if (unlikely(!thread_is_running(thread))) {
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
OBD_FREE_PTR(oii);
RETURN(-EAGAIN);
}
if (cfs_list_empty(&scrub->os_inconsistent_items))
wakeup = 1;
cfs_list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
if (wakeup != 0)
cfs_waitq_broadcast(&thread->t_ctl_waitq);
struct osd_inconsistent_item *oii;
ENTRY;
- cfs_spin_lock(&scrub->os_lock);
+ spin_lock(&scrub->os_lock);
cfs_list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
if (lu_fid_eq(fid, &oii->oii_cache.oic_fid)) {
*id = oii->oii_cache.oic_lid;
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
RETURN(0);
}
}
- cfs_spin_unlock(&scrub->os_lock);
+ spin_unlock(&scrub->os_lock);
RETURN(-ENOENT);
}
int ret = -ENOSPC;
int rc;
- cfs_down_read(&scrub->os_rwsem);
+ down_read(&scrub->os_rwsem);
rc = snprintf(buf, len,
"name: OI scrub\n"
"magic: 0x%x\n"
ret = save - len;
out:
- cfs_up_read(&scrub->os_rwsem);
+ up_read(&scrub->os_rwsem);
return ret;
}
/* write lock for scrub prep/update/post/checkpoint,
* read lock for scrub dump. */
- cfs_rw_semaphore_t os_rwsem;
- cfs_spinlock_t os_lock;
+ struct rw_semaphore os_rwsem;
+ spinlock_t os_lock;
/* Scrub file in memory. */
struct scrub_file os_file;
oh->ot_tx = tx;
CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
CFS_INIT_LIST_HEAD(&oh->ot_sa_list);
- cfs_sema_init(&oh->ot_sa_lock, 1);
+ sema_init(&oh->ot_sa_lock, 1);
memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
th = &oh->ot_super;
th->th_dev = dt;
*exp = class_conn2export(&conn);
- cfs_spin_lock(&osd->od_objset.lock);
+ spin_lock(&osd->od_objset.lock);
osd->od_connects++;
- cfs_spin_unlock(&osd->od_objset.lock);
+ spin_unlock(&osd->od_objset.lock);
RETURN(0);
}
ENTRY;
/* Only disconnect the underlying layers on the final disconnect. */
- cfs_spin_lock(&osd->od_objset.lock);
+ spin_lock(&osd->od_objset.lock);
osd->od_connects--;
if (osd->od_connects == 0)
release = 1;
- cfs_spin_unlock(&osd->od_objset.lock);
+ spin_unlock(&osd->od_objset.lock);
rc = class_disconnect(exp); /* bz 9811 */
struct thandle ot_super;
cfs_list_t ot_dcb_list;
cfs_list_t ot_sa_list;
- cfs_semaphore_t ot_sa_lock;
+ struct semaphore ot_sa_lock;
dmu_tx_t *ot_tx;
struct lquota_trans ot_quota_trans;
__u32 ot_write_commit:1,
nvlist_t *oo_sa_xattr;
cfs_list_t oo_sa_linkage;
- cfs_rw_semaphore_t oo_sem;
+ struct rw_semaphore oo_sem;
/* cached attributes */
- cfs_rwlock_t oo_attr_lock;
+ rwlock_t oo_attr_lock;
struct lu_attr oo_attr;
/* protects extended attributes */
- cfs_semaphore_t oo_guard;
+ struct semaphore oo_guard;
uint64_t oo_xattr;
/* record size for index file */
LASSERT(dt_object_exists(dt));
LASSERT(obj->oo_db);
- cfs_read_lock(&obj->oo_attr_lock);
+ read_lock(&obj->oo_attr_lock);
old_size = obj->oo_attr.la_size;
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
if (*pos + size > old_size) {
if (old_size < *pos)
dmu_write(osd->od_objset.os, obj->oo_db->db_object, offset,
(uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
if (obj->oo_attr.la_size < offset + buf->lb_len) {
obj->oo_attr.la_size = offset + buf->lb_len;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
/* osd_object_sa_update() will be copying directly from oo_attr
* into dbuf. any update within a single txg will copy the
* most actual */
if (unlikely(rc))
GOTO(out, rc);
} else {
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
}
*pos += buf->lb_len;
RETURN(0);
}
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
if (obj->oo_attr.la_size < new_size) {
obj->oo_attr.la_size = new_size;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
/* osd_object_sa_update() will be copying directly from
* oo_attr into dbuf. any update within a single txg will copy
* the most actual */
rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
&obj->oo_attr.la_size, 8, oh);
} else {
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
}
RETURN(rc);
LASSERT(th != NULL);
oh = container_of0(th, struct osd_thandle, ot_super);
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
/* truncate */
if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
len = DMU_OBJECT_END;
else
len = end - start;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = __osd_object_punch(osd->od_objset.os, obj->oo_db, oh->ot_tx,
obj->oo_attr.la_size, start, len);
/* set new size */
if (len == DMU_OBJECT_END) {
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
obj->oo_attr.la_size = start;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
&obj->oo_attr.la_size, 8, oh);
}
oh = container_of0(handle, struct osd_thandle, ot_super);
- cfs_read_lock(&obj->oo_attr_lock);
+ read_lock(&obj->oo_attr_lock);
if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
len = DMU_OBJECT_END;
else
/* declare we'll free some blocks ... */
if (start < obj->oo_attr.la_size) {
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
} else {
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
}
/* ... and we'll modify size attribute */
if (!cfs_list_empty(&obj->oo_sa_linkage))
return;
- cfs_down(&oh->ot_sa_lock);
- cfs_write_lock(&obj->oo_attr_lock);
+ down(&oh->ot_sa_lock);
+ write_lock(&obj->oo_attr_lock);
if (likely(cfs_list_empty(&obj->oo_sa_linkage)))
cfs_list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
- cfs_write_unlock(&obj->oo_attr_lock);
- cfs_up(&oh->ot_sa_lock);
+ write_unlock(&obj->oo_attr_lock);
+ up(&oh->ot_sa_lock);
}
/*
{
struct osd_object *obj;
- cfs_down(&oh->ot_sa_lock);
+ down(&oh->ot_sa_lock);
while (!cfs_list_empty(&oh->ot_sa_list)) {
obj = cfs_list_entry(oh->ot_sa_list.next,
struct osd_object, oo_sa_linkage);
sa_spill_rele(obj->oo_sa_hdl);
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
cfs_list_del_init(&obj->oo_sa_linkage);
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
}
- cfs_up(&oh->ot_sa_lock);
+ up(&oh->ot_sa_lock);
}
/*
mo->oo_dt.do_ops = &osd_obj_ops;
l->lo_ops = &osd_lu_obj_ops;
CFS_INIT_LIST_HEAD(&mo->oo_sa_linkage);
- cfs_init_rwsem(&mo->oo_sem);
- cfs_sema_init(&mo->oo_guard, 1);
- cfs_rwlock_init(&mo->oo_attr_lock);
+ init_rwsem(&mo->oo_sem);
+ sema_init(&mo->oo_guard, 1);
+ rwlock_init(&mo->oo_attr_lock);
return l;
} else {
return NULL;
int __osd_object_free(udmu_objset_t *uos, uint64_t oid, dmu_tx_t *tx)
{
LASSERT(uos->objects != 0);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects--;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
return -dmu_object_free(uos->os, oid, tx);
}
LASSERT(osd_invariant(obj));
- cfs_down_read(&obj->oo_sem);
+ down_read(&obj->oo_sem);
}
static void osd_object_write_lock(const struct lu_env *env,
LASSERT(osd_invariant(obj));
- cfs_down_write(&obj->oo_sem);
+ down_write(&obj->oo_sem);
}
static void osd_object_read_unlock(const struct lu_env *env,
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
- cfs_up_read(&obj->oo_sem);
+ up_read(&obj->oo_sem);
}
static void osd_object_write_unlock(const struct lu_env *env,
struct osd_object *obj = osd_dt_obj(dt);
LASSERT(osd_invariant(obj));
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
static int osd_object_write_locked(const struct lu_env *env,
LASSERT(osd_invariant(obj));
- if (cfs_down_write_trylock(&obj->oo_sem)) {
+ if (down_write_trylock(&obj->oo_sem)) {
rc = 0;
- cfs_up_write(&obj->oo_sem);
+ up_write(&obj->oo_sem);
}
return rc;
}
LASSERT(osd_invariant(obj));
LASSERT(obj->oo_db);
- cfs_read_lock(&obj->oo_attr_lock);
+ read_lock(&obj->oo_attr_lock);
*attr = obj->oo_attr;
- cfs_read_unlock(&obj->oo_attr_lock);
+ read_unlock(&obj->oo_attr_lock);
/* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
* from within sa_object_size() can block on a mutex, so
obj->oo_attr.la_gid, rc);
}
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
cnt = 0;
if (la->la_valid & LA_ATIME) {
osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
&osa->gid, 8);
}
obj->oo_attr.la_valid |= la->la_valid;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
int rc;
LASSERT(tag);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects++;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
/* Assert that the transaction has been assigned to a
transaction group. */
LASSERT(tag);
- cfs_spin_lock(&uos->lock);
+ spin_lock(&uos->lock);
uos->objects++;
- cfs_spin_unlock(&uos->lock);
+ spin_unlock(&uos->lock);
/* Assert that the transaction has been assigned to a
transaction group. */
/* concurrent create declarations should not see
* the object inconsistent (db, attr, etc).
* in regular cases acquisition should be cheap */
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
LASSERT(osd_invariant(obj));
LASSERT(!dt_object_exists(dt));
}
out:
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
RETURN(rc);
}
oh = container_of0(handle, struct osd_thandle, ot_super);
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
nlink = ++obj->oo_attr.la_nlink;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_update(obj, SA_ZPL_LINKS(uos), &nlink, 8, oh);
return rc;
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
- cfs_write_lock(&obj->oo_attr_lock);
+ write_lock(&obj->oo_attr_lock);
nlink = --obj->oo_attr.la_nlink;
- cfs_write_unlock(&obj->oo_attr_lock);
+ write_unlock(&obj->oo_attr_lock);
rc = osd_object_sa_update(obj, SA_ZPL_LINKS(uos), &nlink, 8, oh);
return rc;
RETURN(rc);
}
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
for (i = 0; i < 2; i++) {
if (keys[i].lk_keyid == capa->lc_keyid) {
oti->oti_capa_key = keys[i];
break;
}
}
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
if (i == 2) {
DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
RETURN(oc);
}
- cfs_spin_lock(&capa_lock);
+ spin_lock(&capa_lock);
*key = dev->od_capa_keys[1];
- cfs_spin_unlock(&capa_lock);
+ spin_unlock(&capa_lock);
capa->lc_keyid = key->lk_keyid;
capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
LASSERT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
rc = __osd_xattr_get(env, obj, buf, name, &size);
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
if (rc == -ENOENT)
rc = -ENODATA;
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
__osd_xattr_declare_set(env, obj, buf->lb_len, name, oh);
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
RETURN(0);
}
oh = container_of0(handle, struct osd_thandle, ot_super);
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
CDEBUG(D_INODE, "Setting xattr %s with size %d\n",
name, (int)buf->lb_len);
rc = osd_xattr_set_internal(env, obj, buf, name, fl, oh, capa);
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
RETURN(rc);
}
LASSERT(oh->ot_tx != NULL);
LASSERT(obj->oo_db != NULL);
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
__osd_xattr_declare_del(env, obj, name, oh);
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
RETURN(0);
}
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_tx != NULL);
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
rc = __osd_xattr_del(env, obj, name, oh);
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
RETURN(rc);
}
LASSERT(osd_invariant(obj));
LASSERT(dt_object_exists(dt));
- cfs_down(&obj->oo_guard);
+ down(&obj->oo_guard);
rc = osd_sa_xattr_list(env, obj, lb);
if (rc < 0)
out_fini:
udmu_zap_cursor_fini(zc);
out:
- cfs_up(&obj->oo_guard);
+ up(&obj->oo_guard);
RETURN(rc);
}
dmu_objset_space(uos->os, &refdbytes, &availbytes, &usedobjs,
&availobjs);
uos->objects = usedobjs;
- cfs_spin_lock_init(&uos->lock);
+ spin_lock_init(&uos->lock);
out:
if (error && uos->os != NULL)
typedef struct udmu_objset {
struct objset *os;
uint64_t root; /* id of root znode */
- cfs_spinlock_t lock; /* protects objects below */
+ spinlock_t lock; /* protects objects below */
uint64_t objects; /* in-core counter of objects */
/* SA attr mapping->id,
* name is the same as in ZFS to use defines SA_ZPL_...*/
* of the cleanup RPCs fails (e.g. ldlm cancel, etc). We don't
* fully deactivate the import, or that would drop all requests. */
LASSERT(imp != NULL);
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
imp->imp_deactive = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
ptlrpc_deactivate_import(imp);
* layer above osp (usually lod) can use ffree to estimate
* how many objects are available for immediate creation
*/
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
sfs->os_fprecreated = d->opd_pre_last_created - d->opd_pre_used_id;
sfs->os_fprecreated -= d->opd_pre_reserved;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
LASSERT(sfs->os_fprecreated <= OST_MAX_PRECREATE * 2);
/* Since the request might also come from lprocfs, so we need
* sync this with client_disconnect_export Bug15684 */
- cfs_down_read(&exp->exp_obd->u.cli.cl_sem);
+ down_read(&exp->exp_obd->u.cli.cl_sem);
if (exp->exp_obd->u.cli.cl_import)
imp = class_import_get(exp->exp_obd->u.cli.cl_import);
- cfs_up_read(&exp->exp_obd->u.cli.cl_sem);
+ up_read(&exp->exp_obd->u.cli.cl_sem);
if (!imp)
RETURN(-ENODEV);
* Infrastructure to support tracking of last committed llog record
*/
struct osp_id_tracker {
- cfs_spinlock_t otr_lock;
+ spinlock_t otr_lock;
__u32 otr_next_id;
__u32 otr_committed_id;
/* callback is register once per diskfs -- that's the whole point */
/*
* Precreation pool
*/
- cfs_spinlock_t opd_pre_lock;
+ spinlock_t opd_pre_lock;
/* last id assigned in creation */
__u64 opd_pre_used_id;
/* last created id OST reported, next-created - available id's */
/*
* OST synchronization
*/
- cfs_spinlock_t opd_syn_lock;
+ spinlock_t opd_syn_lock;
/* unique generation, to recognize start of new records in the llog */
struct llog_gen opd_syn_generation;
/* number of changes to sync, used to wake up sync thread */
th);
} else {
/* not needed in the cache anymore */
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE,
+ set_bit(LU_OBJECT_HEARD_BANSHEE,
&dt->do_lu.lo_header->loh_flags);
}
RETURN(rc);
rc = fid_ostid_pack(lu_object_fid(&dt->do_lu), &osi->osi_oi);
LASSERT(rc == 0);
osi->osi_id = ostid_id(&osi->osi_oi);
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
osp_update_last_id(d, osi->osi_id);
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
}
LASSERT(osi->osi_id);
/* we might have lost precreated objects */
if (unlikely(d->opd_gap_count) > 0) {
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
if (d->opd_gap_count > 0) {
int count = d->opd_gap_count;
osi->osi_oi.oi_id = d->opd_gap_start;
d->opd_gap_count = 0;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
CDEBUG(D_HA, "Found gap "LPU64"+%d in objids\n",
d->opd_gap_start, count);
/* real gap handling is disabled intil ORI-692 will be
* fixed, now we only report gaps */
} else {
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
}
}
rc = osp_sync_add(env, o, MDS_UNLINK64_REC, th, NULL);
/* not needed in cache any more */
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
RETURN(rc);
}
*/
if (unlikely(po->opo_reserved)) {
LASSERT(d->opd_pre_reserved > 0);
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
d->opd_pre_reserved--;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
/* not needed in cache any more */
- cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
+ set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
}
EXIT;
}
int rc;
/* XXX: do we really need locking here? */
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
rc = osp_precreate_near_empty_nolock(d);
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
return rc;
}
RETURN(rc);
}
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
if (d->opd_pre_grow_count > d->opd_pre_max_grow_count / 2)
d->opd_pre_grow_count = d->opd_pre_max_grow_count / 2;
grow = d->opd_pre_grow_count;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
LASSERT(body);
diff = body->oa.o_id - d->opd_pre_last_created;
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
if (diff < grow) {
/* the OST has not managed to create all the
* objects we asked for */
d->opd_pre_grow_slow = 0;
}
d->opd_pre_last_created = body->oa.o_id;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
CDEBUG(D_OTHER, "current precreated pool: %llu-%llu\n",
d->opd_pre_used_id, d->opd_pre_last_created);
/*
* OST provides us with id new pool starts from in body->oa.o_id
*/
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
if (le64_to_cpu(d->opd_last_used_id) > body->oa.o_id) {
d->opd_pre_grow_count = OST_MIN_PRECREATE +
le64_to_cpu(d->opd_last_used_id) -
}
d->opd_pre_used_id = d->opd_pre_last_created;
d->opd_pre_grow_slow = 0;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
CDEBUG(D_HA, "%s: Got last_id "LPU64" from OST, last_used is "LPU64
", pre_used "LPU64"\n", d->opd_obd->obd_name, body->oa.o_id,
sprintf(pname, "osp-pre-%u\n", d->opd_index);
cfs_daemonize(pname);
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
thread->t_flags = SVC_RUNNING;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
while (osp_precreate_running(d)) {
d->opd_pre_grow_slow == 0 &&
(d->opd_pre_last_created - d->opd_pre_used_id <=
d->opd_pre_grow_count / 4 + 1)) {
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
d->opd_pre_grow_slow = 1;
d->opd_pre_grow_count *= 2;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
}
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
precreated = d->opd_pre_last_created - d->opd_pre_used_id;
if (precreated > d->opd_pre_reserved) {
d->opd_pre_reserved++;
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
rc = 0;
/* XXX: don't wake up if precreation is in progress */
break;
}
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
/*
* all precreated objects have been used and no-space
obd_id objid;
/* grab next id from the pool */
- cfs_spin_lock(&d->opd_pre_lock);
+ spin_lock(&d->opd_pre_lock);
LASSERT(d->opd_pre_used_id < d->opd_pre_last_created);
objid = ++d->opd_pre_used_id;
d->opd_pre_reserved--;
* we might miscalculate gap causing object loss or leak
*/
osp_update_last_id(d, objid);
- cfs_spin_unlock(&d->opd_pre_lock);
+ spin_unlock(&d->opd_pre_lock);
/*
* probably main thread suspended orphan cleanup till
d->opd_pre_min_grow_count = OST_MIN_PRECREATE;
d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
- cfs_spin_lock_init(&d->opd_pre_lock);
+ spin_lock_init(&d->opd_pre_lock);
cfs_waitq_init(&d->opd_pre_waitq);
cfs_waitq_init(&d->opd_pre_user_waitq);
cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq);
rc = 0;
if (likely(rc == 0)) {
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_changes++;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
}
RETURN(rc);
/* this request was aborted by the shutdown procedure,
* not committed by the peer. we should preserve llog
* record */
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_progress--;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
cfs_waitq_signal(&d->opd_syn_waitq);
return;
}
ptlrpc_request_addref(req);
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
/* XXX: some batching wouldn't hurt */
cfs_waitq_signal(&d->opd_syn_waitq);
ptlrpc_request_addref(req);
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
cfs_waitq_signal(&d->opd_syn_waitq);
} else if (rc) {
/* this is the last time we see the request
* if transno is not zero, then commit cb
* will be called at some point */
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_progress--;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
}
cfs_waitq_signal(&d->opd_syn_waitq);
}
LASSERT(d->opd_syn_rpc_in_flight > 0);
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_flight--;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
d->opd_syn_rpc_in_progress);
/* notice we increment counters before sending RPC, to be consistent
* in RPC interpret callback which may happen very quickly */
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_flight++;
d->opd_syn_rpc_in_progress++;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
switch (rec->lrh_type) {
/* case MDS_UNLINK_REC is kept for compatibility */
}
if (likely(rc == 0)) {
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
if (d->opd_syn_prev_done) {
LASSERT(d->opd_syn_changes > 0);
LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
d->opd_syn_rpc_in_progress);
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
} else {
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_flight--;
d->opd_syn_rpc_in_progress--;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
}
CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
LASSERT(llh);
CFS_INIT_LIST_HEAD(&list);
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
cfs_list_splice(&d->opd_syn_committed_there, &list);
CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
llog_ctxt_put(ctxt);
LASSERT(d->opd_syn_rpc_in_progress >= done);
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
d->opd_syn_rpc_in_progress -= done;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
d->opd_syn_rpc_in_progress);
sprintf(pname, "osp-syn-%u\n", d->opd_index);
cfs_daemonize(pname);
- cfs_spin_lock(&d->opd_syn_lock);
+ spin_lock(&d->opd_syn_lock);
thread->t_flags = SVC_RUNNING;
- cfs_spin_unlock(&d->opd_syn_lock);
+ spin_unlock(&d->opd_syn_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
*/
d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
- cfs_spin_lock_init(&d->opd_syn_lock);
+ spin_lock_init(&d->opd_syn_lock);
cfs_waitq_init(&d->opd_syn_waitq);
cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
RETURN(0);
}
-static CFS_DEFINE_MUTEX(osp_id_tracker_sem);
+static DEFINE_MUTEX(osp_id_tracker_sem);
static CFS_LIST_HEAD(osp_id_tracker_list);
static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
if (txn == NULL || txn->oti_current_id < tr->otr_committed_id)
return;
- cfs_spin_lock(&tr->otr_lock);
+ spin_lock(&tr->otr_lock);
if (likely(txn->oti_current_id > tr->otr_committed_id)) {
CDEBUG(D_OTHER, "committed: %u -> %u\n",
tr->otr_committed_id, txn->oti_current_id);
cfs_waitq_signal(&d->opd_syn_waitq);
}
}
- cfs_spin_unlock(&tr->otr_lock);
+ spin_unlock(&tr->otr_lock);
}
static int osp_sync_id_traction_init(struct osp_device *d)
LASSERT(d->opd_syn_tracker == NULL);
CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
- cfs_mutex_lock(&osp_id_tracker_sem);
+ mutex_lock(&osp_id_tracker_sem);
cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
if (tr->otr_dev == d->opd_storage) {
LASSERT(cfs_atomic_read(&tr->otr_refcount));
OBD_ALLOC_PTR(tr);
if (tr) {
d->opd_syn_tracker = tr;
- cfs_spin_lock_init(&tr->otr_lock);
+ spin_lock_init(&tr->otr_lock);
tr->otr_dev = d->opd_storage;
tr->otr_next_id = 1;
tr->otr_committed_id = 0;
rc = 0;
}
}
- cfs_mutex_unlock(&osp_id_tracker_sem);
+ mutex_unlock(&osp_id_tracker_sem);
return rc;
}
osp_sync_remove_from_tracker(d);
- cfs_mutex_lock(&osp_id_tracker_sem);
+ mutex_lock(&osp_id_tracker_sem);
if (cfs_atomic_dec_and_test(&tr->otr_refcount)) {
dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
OBD_FREE_PTR(tr);
d->opd_syn_tracker = NULL;
}
- cfs_mutex_unlock(&osp_id_tracker_sem);
+ mutex_unlock(&osp_id_tracker_sem);
EXIT;
}
LASSERT(tr);
/* XXX: we can improve this introducing per-cpu preallocated ids? */
- cfs_spin_lock(&tr->otr_lock);
+ spin_lock(&tr->otr_lock);
if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) {
- cfs_spin_unlock(&tr->otr_lock);
+ spin_unlock(&tr->otr_lock);
CERROR("%s: next %u, last synced %lu\n",
d->opd_obd->obd_name, tr->otr_next_id,
d->opd_syn_last_used_id);
d->opd_syn_last_used_id = id;
if (cfs_list_empty(&d->opd_syn_ontrack))
cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
- cfs_spin_unlock(&tr->otr_lock);
+ spin_unlock(&tr->otr_lock);
CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
return id;
if (cfs_list_empty(&d->opd_syn_ontrack))
return;
- cfs_spin_lock(&tr->otr_lock);
+ spin_lock(&tr->otr_lock);
cfs_list_del_init(&d->opd_syn_ontrack);
- cfs_spin_unlock(&tr->otr_lock);
+ spin_unlock(&tr->otr_lock);
}
RETURN(rc);
}
-#define ost_init_sec_none(reply, exp) \
-do { \
- reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT | \
- OBD_CONNECT_RMT_CLIENT_FORCE | \
- OBD_CONNECT_OSS_CAPA); \
- cfs_spin_lock(&exp->exp_lock); \
- exp->exp_connect_flags = reply->ocd_connect_flags; \
- cfs_spin_unlock(&exp->exp_lock); \
+#define ost_init_sec_none(reply, exp) \
+do { \
+ reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT | \
+ OBD_CONNECT_RMT_CLIENT_FORCE | \
+ OBD_CONNECT_OSS_CAPA); \
+ spin_lock(&exp->exp_lock); \
+ exp->exp_connect_flags = reply->ocd_connect_flags; \
+ spin_unlock(&exp->exp_lock); \
} while (0)
static int ost_init_sec_level(struct ptlrpc_request *req)
if (!filter->fo_fl_oss_capa)
reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
- cfs_spin_lock(&exp->exp_lock);
- exp->exp_connect_flags = reply->ocd_connect_flags;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
+ exp->exp_connect_flags = reply->ocd_connect_flags;
+ spin_unlock(&exp->exp_lock);
}
break;
default:
}
if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
- cfs_read_lock(&filter->fo_sptlrpc_lock);
- sptlrpc_target_choose_flavor(&filter->fo_sptlrpc_rset,
- req->rq_sp_from,
- req->rq_peer.nid,
- &flvr);
- cfs_read_unlock(&filter->fo_sptlrpc_lock);
+ read_lock(&filter->fo_sptlrpc_lock);
+ sptlrpc_target_choose_flavor(&filter->fo_sptlrpc_rset,
+ req->rq_sp_from,
+ req->rq_peer.nid,
+ &flvr);
+ read_unlock(&filter->fo_sptlrpc_lock);
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
exp->exp_sp_peer = req->rq_sp_from;
exp->exp_flvr = flvr;
rc = -EACCES;
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
} else {
if (exp->exp_sp_peer != req->rq_sp_from) {
CERROR("RPC source %s doesn't match %s\n",
}
- cfs_spin_lock_bh(&exp->exp_bl_list_lock);
+ spin_lock_bh(&exp->exp_bl_list_lock);
cfs_list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) {
LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
ost_prolong_lock_one(data, lock);
}
- cfs_spin_unlock_bh(&exp->exp_bl_list_lock);
+ spin_unlock_bh(&exp->exp_bl_list_lock);
- EXIT;
+ EXIT;
}
/**
lprocfs_ost_init_vars(&lvars);
lprocfs_obd_setup(obd, lvars.obd_vars);
- cfs_mutex_init(&ost->ost_health_mutex);
+ mutex_init(&ost->ost_health_mutex);
svc_conf = (typeof(svc_conf)) {
.psc_name = LUSTRE_OSS_NAME,
/* there is no recovery for OST OBD, all recovery is controlled by
* obdfilter OBD */
LASSERT(obd->obd_recovering == 0);
- cfs_mutex_lock(&ost->ost_health_mutex);
+ mutex_lock(&ost->ost_health_mutex);
ptlrpc_unregister_service(ost->ost_service);
ptlrpc_unregister_service(ost->ost_create_service);
ptlrpc_unregister_service(ost->ost_io_service);
ost->ost_create_service = NULL;
ost->ost_io_service = NULL;
- cfs_mutex_unlock(&ost->ost_health_mutex);
+ mutex_unlock(&ost->ost_health_mutex);
lprocfs_obd_cleanup(obd);
struct ost_obd *ost = &obd->u.ost;
int rc = 0;
- cfs_mutex_lock(&ost->ost_health_mutex);
+ mutex_lock(&ost->ost_health_mutex);
rc |= ptlrpc_service_health_check(ost->ost_service);
rc |= ptlrpc_service_health_check(ost->ost_create_service);
rc |= ptlrpc_service_health_check(ost->ost_io_service);
- cfs_mutex_unlock(&ost->ost_health_mutex);
+ mutex_unlock(&ost->ost_health_mutex);
/*
* health_check to return 0 on healthy
if (!desc)
return NULL;
- cfs_spin_lock_init(&desc->bd_lock);
+ spin_lock_init(&desc->bd_lock);
cfs_waitq_init(&desc->bd_waitq);
desc->bd_max_iov = npages;
desc->bd_iov_count = 0;
ENTRY;
req->rq_early = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
- rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
- if (rc) {
- cfs_spin_lock(&req->rq_lock);
+ rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
+ if (rc) {
+ spin_lock(&req->rq_lock);
RETURN(rc);
}
sptlrpc_cli_finish_early_reply(early_req);
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
if (rc == 0) {
/* Adjust the local timeout for this req */
*/
void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
{
- cfs_list_t *l, *tmp;
- struct ptlrpc_request *req;
+ cfs_list_t *l, *tmp;
+ struct ptlrpc_request *req;
- LASSERT(pool != NULL);
+ LASSERT(pool != NULL);
- cfs_spin_lock(&pool->prp_lock);
- cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
- req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
- cfs_list_del(&req->rq_list);
- LASSERT(req->rq_reqbuf);
- LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
- OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
- OBD_FREE(req, sizeof(*req));
- }
- cfs_spin_unlock(&pool->prp_lock);
- OBD_FREE(pool, sizeof(*pool));
+ spin_lock(&pool->prp_lock);
+ cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
+ req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
+ cfs_list_del(&req->rq_list);
+ LASSERT(req->rq_reqbuf);
+ LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
+ OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
+ OBD_FREE(req, sizeof(*req));
+ }
+ spin_unlock(&pool->prp_lock);
+ OBD_FREE(pool, sizeof(*pool));
}
EXPORT_SYMBOL(ptlrpc_free_rq_pool);
"Trying to change pool size with nonempty pool "
"from %d to %d bytes\n", pool->prp_rq_size, size);
- cfs_spin_lock(&pool->prp_lock);
- pool->prp_rq_size = size;
- for (i = 0; i < num_rq; i++) {
- struct ptlrpc_request *req;
- struct lustre_msg *msg;
+ spin_lock(&pool->prp_lock);
+ pool->prp_rq_size = size;
+ for (i = 0; i < num_rq; i++) {
+ struct ptlrpc_request *req;
+ struct lustre_msg *msg;
- cfs_spin_unlock(&pool->prp_lock);
+ spin_unlock(&pool->prp_lock);
OBD_ALLOC(req, sizeof(struct ptlrpc_request));
if (!req)
return;
req->rq_reqbuf = msg;
req->rq_reqbuf_len = size;
req->rq_pool = pool;
- cfs_spin_lock(&pool->prp_lock);
- cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
- }
- cfs_spin_unlock(&pool->prp_lock);
- return;
+ spin_lock(&pool->prp_lock);
+ cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
+ }
+ spin_unlock(&pool->prp_lock);
+ return;
}
EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
/* Request next power of two for the allocation, because internally
kernel would do exactly this */
- cfs_spin_lock_init(&pool->prp_lock);
+ spin_lock_init(&pool->prp_lock);
CFS_INIT_LIST_HEAD(&pool->prp_req_list);
pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
pool->prp_populate = populate_pool;
if (!pool)
return NULL;
- cfs_spin_lock(&pool->prp_lock);
+ spin_lock(&pool->prp_lock);
- /* See if we have anything in a pool, and bail out if nothing,
- * in writeout path, where this matters, this is safe to do, because
- * nothing is lost in this case, and when some in-flight requests
- * complete, this code will be called again. */
- if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
- cfs_spin_unlock(&pool->prp_lock);
- return NULL;
- }
+ /* See if we have anything in a pool, and bail out if nothing,
+ * in writeout path, where this matters, this is safe to do, because
+ * nothing is lost in this case, and when some in-flight requests
+ * complete, this code will be called again. */
+ if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+ spin_unlock(&pool->prp_lock);
+ return NULL;
+ }
- request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
- cfs_list_del_init(&request->rq_list);
- cfs_spin_unlock(&pool->prp_lock);
+ request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+ rq_list);
+ cfs_list_del_init(&request->rq_list);
+ spin_unlock(&pool->prp_lock);
LASSERT(request->rq_reqbuf);
LASSERT(request->rq_pool);
*/
static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
{
- struct ptlrpc_request_pool *pool = request->rq_pool;
+ struct ptlrpc_request_pool *pool = request->rq_pool;
- cfs_spin_lock(&pool->prp_lock);
- LASSERT(cfs_list_empty(&request->rq_list));
- LASSERT(!request->rq_receiving_reply);
- cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
- cfs_spin_unlock(&pool->prp_lock);
+ spin_lock(&pool->prp_lock);
+ LASSERT(cfs_list_empty(&request->rq_list));
+ LASSERT(!request->rq_receiving_reply);
+ cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+ spin_unlock(&pool->prp_lock);
}
static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
ptlrpc_at_set_req_timeout(request);
- cfs_spin_lock_init(&request->rq_lock);
+ spin_lock_init(&request->rq_lock);
CFS_INIT_LIST_HEAD(&request->rq_list);
CFS_INIT_LIST_HEAD(&request->rq_timed_list);
CFS_INIT_LIST_HEAD(&request->rq_replay_list);
cfs_waitq_init(&set->set_waitq);
cfs_atomic_set(&set->set_new_count, 0);
cfs_atomic_set(&set->set_remaining, 0);
- cfs_spin_lock_init(&set->set_new_req_lock);
+ spin_lock_init(&set->set_new_req_lock);
CFS_INIT_LIST_HEAD(&set->set_new_requests);
CFS_INIT_LIST_HEAD(&set->set_cblist);
set->set_max_inflight = UINT_MAX;
cfs_atomic_dec(&set->set_remaining);
}
- cfs_spin_lock(&req->rq_lock);
- req->rq_set = NULL;
- req->rq_invalid_rqset = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_set = NULL;
+ req->rq_invalid_rqset = 0;
+ spin_unlock(&req->rq_lock);
ptlrpc_req_finished (req);
}
int count, i;
LASSERT(req->rq_set == NULL);
- LASSERT(cfs_test_bit(LIOD_STOP, &pc->pc_flags) == 0);
+ LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
- cfs_spin_lock(&set->set_new_req_lock);
- /*
- * The set takes over the caller's request reference.
- */
- req->rq_set = set;
- req->rq_queued_time = cfs_time_current();
- cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
- count = cfs_atomic_inc_return(&set->set_new_count);
- cfs_spin_unlock(&set->set_new_req_lock);
+ spin_lock(&set->set_new_req_lock);
+ /*
+ * The set takes over the caller's request reference.
+ */
+ req->rq_set = set;
+ req->rq_queued_time = cfs_time_current();
+ cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+ count = cfs_atomic_inc_return(&set->set_new_count);
+ spin_unlock(&set->set_new_req_lock);
/* Only need to call wakeup once for the first entry. */
if (count == 1) {
}
if (imp->imp_replayable) {
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
/*
* No point in adding already-committed requests to the replay
* list, we will just remove them immediately. b=9829
ptlrpc_save_versions(req);
ptlrpc_retain_replayable_request(req, imp);
} else if (req->rq_commit_cb != NULL) {
- cfs_spin_unlock(&imp->imp_lock);
- req->rq_commit_cb(req);
- cfs_spin_lock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
+ req->rq_commit_cb(req);
+ spin_lock(&imp->imp_lock);
}
/*
if (req->rq_transno > imp->imp_peer_committed_transno)
ptlrpc_pinger_commit_expected(imp);
- cfs_spin_unlock(&imp->imp_lock);
- }
+ spin_unlock(&imp->imp_lock);
+ }
- RETURN(rc);
+ RETURN(rc);
}
/**
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
- cfs_spin_lock(&imp->imp_lock);
-
- if (!req->rq_generation_set)
- req->rq_import_generation = imp->imp_generation;
-
- if (ptlrpc_import_delay_req(imp, req, &rc)) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_waiting = 1;
- cfs_spin_unlock(&req->rq_lock);
-
- DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
- "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
- ptlrpc_import_state_name(req->rq_send_state),
- ptlrpc_import_state_name(imp->imp_state));
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
- cfs_spin_unlock(&imp->imp_lock);
- RETURN(0);
- }
+ spin_lock(&imp->imp_lock);
+
+ if (!req->rq_generation_set)
+ req->rq_import_generation = imp->imp_generation;
+
+ if (ptlrpc_import_delay_req(imp, req, &rc)) {
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 1;
+ spin_unlock(&req->rq_lock);
+
+ DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
+ "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+ ptlrpc_import_state_name(req->rq_send_state),
+ ptlrpc_import_state_name(imp->imp_state));
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
+ RETURN(0);
+ }
- if (rc != 0) {
- cfs_spin_unlock(&imp->imp_lock);
- req->rq_status = rc;
- ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- RETURN(rc);
- }
+ if (rc != 0) {
+ spin_unlock(&imp->imp_lock);
+ req->rq_status = rc;
+ ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+ RETURN(rc);
+ }
- LASSERT(cfs_list_empty(&req->rq_list));
- cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
- cfs_atomic_inc(&req->rq_import->imp_inflight);
- cfs_spin_unlock(&imp->imp_lock);
+ LASSERT(cfs_list_empty(&req->rq_list));
+ cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
+ cfs_atomic_inc(&req->rq_import->imp_inflight);
+ spin_unlock(&imp->imp_lock);
lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
}
if (req->rq_err) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_replied = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_replied = 0;
+ spin_unlock(&req->rq_lock);
if (req->rq_status == 0)
req->rq_status = -EIO;
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
if (!ptlrpc_unregister_reply(req, 1))
continue;
- cfs_spin_lock(&imp->imp_lock);
- if (ptlrpc_import_delay_req(imp, req, &status)){
- /* put on delay list - only if we wait
- * recovery finished - before send */
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
- &imp-> \
- imp_delayed_list);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ if (ptlrpc_import_delay_req(imp, req, &status)){
+ /* put on delay list - only if we wait
+ * recovery finished - before send */
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
+ &imp->
+ imp_delayed_list);
+ spin_unlock(&imp->imp_lock);
continue;
}
req->rq_status = status;
ptlrpc_rqphase_move(req,
RQ_PHASE_INTERPRET);
- cfs_spin_unlock(&imp->imp_lock);
- GOTO(interpret, req->rq_status);
- }
- if (ptlrpc_no_resend(req) && !req->rq_wait_ctx) {
- req->rq_status = -ENOTCONN;
- ptlrpc_rqphase_move(req,
- RQ_PHASE_INTERPRET);
- cfs_spin_unlock(&imp->imp_lock);
- GOTO(interpret, req->rq_status);
- }
-
- cfs_list_del_init(&req->rq_list);
- cfs_list_add_tail(&req->rq_list,
- &imp->imp_sending_list);
-
- cfs_spin_unlock(&imp->imp_lock);
-
- cfs_spin_lock(&req->rq_lock);
- req->rq_waiting = 0;
- cfs_spin_unlock(&req->rq_lock);
-
- if (req->rq_timedout || req->rq_resend) {
- /* This is re-sending anyways,
- * let's mark req as resend. */
- cfs_spin_lock(&req->rq_lock);
- req->rq_resend = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&imp->imp_lock);
+ GOTO(interpret, req->rq_status);
+ }
+ if (ptlrpc_no_resend(req) &&
+ !req->rq_wait_ctx) {
+ req->rq_status = -ENOTCONN;
+ ptlrpc_rqphase_move(req,
+ RQ_PHASE_INTERPRET);
+ spin_unlock(&imp->imp_lock);
+ GOTO(interpret, req->rq_status);
+ }
+
+ cfs_list_del_init(&req->rq_list);
+ cfs_list_add_tail(&req->rq_list,
+ &imp->imp_sending_list);
+
+ spin_unlock(&imp->imp_lock);
+
+ spin_lock(&req->rq_lock);
+ req->rq_waiting = 0;
+ spin_unlock(&req->rq_lock);
+
+ if (req->rq_timedout || req->rq_resend) {
+ /* This is re-sending anyways,
+ * let's mark req as resend. */
+ spin_lock(&req->rq_lock);
+ req->rq_resend = 1;
+ spin_unlock(&req->rq_lock);
if (req->rq_bulk) {
__u64 old_xid;
if (status) {
if (req->rq_err) {
req->rq_status = status;
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- cfs_spin_unlock(&req->rq_lock);
- force_timer_recalc = 1;
- } else {
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 1;
- cfs_spin_unlock(&req->rq_lock);
- }
-
- continue;
- } else {
- cfs_spin_lock(&req->rq_lock);
- req->rq_wait_ctx = 0;
- cfs_spin_unlock(&req->rq_lock);
- }
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 0;
+ spin_unlock(&req->rq_lock);
+ force_timer_recalc = 1;
+ } else {
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 1;
+ spin_unlock(&req->rq_lock);
+ }
+
+ continue;
+ } else {
+ spin_lock(&req->rq_lock);
+ req->rq_wait_ctx = 0;
+ spin_unlock(&req->rq_lock);
+ }
+
+ rc = ptl_send_rpc(req, 0);
+ if (rc) {
+ DEBUG_REQ(D_HA, req,
+ "send failed: rc = %d", rc);
+ force_timer_recalc = 1;
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
+ }
+ /* need to reset the timeout */
+ force_timer_recalc = 1;
+ }
- rc = ptl_send_rpc(req, 0);
- if (rc) {
- DEBUG_REQ(D_HA, req, "send failed (%d)",
- rc);
- force_timer_recalc = 1;
- cfs_spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- cfs_spin_unlock(&req->rq_lock);
- }
- /* need to reset the timeout */
- force_timer_recalc = 1;
- }
+ spin_lock(&req->rq_lock);
- cfs_spin_lock(&req->rq_lock);
+ if (ptlrpc_client_early(req)) {
+ ptlrpc_at_recv_early_reply(req);
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- if (ptlrpc_client_early(req)) {
- ptlrpc_at_recv_early_reply(req);
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
+ /* Still waiting for a reply? */
+ if (ptlrpc_client_recv(req)) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- /* Still waiting for a reply? */
- if (ptlrpc_client_recv(req)) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
+ /* Did we actually receive a reply? */
+ if (!ptlrpc_client_replied(req)) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- /* Did we actually receive a reply? */
- if (!ptlrpc_client_replied(req)) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
-
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
/* unlink from net because we are going to
* swab in-place of reply buffer */
libcfs_nid2str(imp->imp_connection->c_peer.nid),
lustre_msg_get_opc(req->rq_reqmsg));
- cfs_spin_lock(&imp->imp_lock);
- /* Request already may be not on sending or delaying list. This
- * may happen in the case of marking it erroneous for the case
- * ptlrpc_import_delay_req(req, status) find it impossible to
- * allow sending this rpc and returns *status != 0. */
- if (!cfs_list_empty(&req->rq_list)) {
- cfs_list_del_init(&req->rq_list);
- cfs_atomic_dec(&imp->imp_inflight);
- }
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ /* Request already may be not on sending or delaying list. This
+ * may happen in the case of marking it erroneous for the case
+ * ptlrpc_import_delay_req(req, status) find it impossible to
+ * allow sending this rpc and returns *status != 0. */
+ if (!cfs_list_empty(&req->rq_list)) {
+ cfs_list_del_init(&req->rq_list);
+ cfs_atomic_dec(&imp->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
cfs_atomic_dec(&set->set_remaining);
cfs_waitq_broadcast(&imp->imp_recovery_waitq);
/* free the request that has just been completed
* in order not to pollute set->set_requests */
cfs_list_del_init(&req->rq_set_chain);
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
req->rq_set = NULL;
req->rq_invalid_rqset = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
/* record rq_status to compute the final status later */
if (req->rq_status != 0)
*/
int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
{
- struct obd_import *imp = req->rq_import;
- int rc = 0;
- ENTRY;
+ struct obd_import *imp = req->rq_import;
+ int rc = 0;
+ ENTRY;
- cfs_spin_lock(&req->rq_lock);
- req->rq_timedout = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_timedout = 1;
+ spin_unlock(&req->rq_lock);
DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
"/real "CFS_DURATION_T"]",
DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
ptlrpc_import_state_name(req->rq_send_state),
ptlrpc_import_state_name(imp->imp_state));
- cfs_spin_lock(&req->rq_lock);
- req->rq_status = -ETIMEDOUT;
- req->rq_err = 1;
- cfs_spin_unlock(&req->rq_lock);
- RETURN(1);
+ spin_lock(&req->rq_lock);
+ req->rq_status = -ETIMEDOUT;
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
+ RETURN(1);
}
/* if a request can't be resent we can't wait for an answer after
*/
void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
{
- cfs_spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
}
EXPORT_SYMBOL(ptlrpc_mark_interrupted);
cfs_list_for_each(tmp, &set->set_requests) {
req = cfs_list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
- cfs_spin_lock(&req->rq_lock);
- req->rq_invalid_rqset = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_invalid_rqset = 1;
+ spin_unlock(&req->rq_lock);
}
}
} while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
/* We must take it off the imp_replay_list first. Otherwise, we'll set
* request->rq_reqmsg to NULL while osc_close is dereferencing it. */
if (request->rq_import != NULL) {
- if (!locked)
- cfs_spin_lock(&request->rq_import->imp_lock);
- cfs_list_del_init(&request->rq_replay_list);
- if (!locked)
- cfs_spin_unlock(&request->rq_import->imp_lock);
+ if (!locked)
+ spin_lock(&request->rq_import->imp_lock);
+ cfs_list_del_init(&request->rq_replay_list);
+ if (!locked)
+ spin_unlock(&request->rq_import->imp_lock);
}
LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
imp->imp_peer_committed_transno);
free_req:
- cfs_spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_replay = 0;
+ spin_unlock(&req->rq_lock);
if (req->rq_commit_cb != NULL)
req->rq_commit_cb(req);
cfs_list_del_init(&req->rq_replay_list);
lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
req->rq_status = -EAGAIN;
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
req->rq_resend = 1;
req->rq_net_err = 0;
req->rq_timedout = 0;
old_xid, req->rq_xid);
}
ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
}
EXPORT_SYMBOL(ptlrpc_resend_req);
/* XXX: this function and rq_status are currently unused */
void ptlrpc_restart_req(struct ptlrpc_request *req)
{
- DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
- req->rq_status = -ERESTARTSYS;
+ DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
+ req->rq_status = -ERESTARTSYS;
- cfs_spin_lock(&req->rq_lock);
- req->rq_restart = 1;
- req->rq_timedout = 0;
- ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_restart = 1;
+ req->rq_timedout = 0;
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
}
EXPORT_SYMBOL(ptlrpc_restart_req);
/** VBR: check version failure */
if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
- /** replay was failed due to version mismatch */
- DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_vbr_failed = 1;
- imp->imp_no_lock_replay = 1;
- cfs_spin_unlock(&imp->imp_lock);
- lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
+ /** replay was failed due to version mismatch */
+ DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
+ spin_lock(&imp->imp_lock);
+ imp->imp_vbr_failed = 1;
+ imp->imp_no_lock_replay = 1;
+ spin_unlock(&imp->imp_lock);
+ lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
} else {
/** The transno had better not change over replay. */
LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
lustre_msg_get_transno(req->rq_repmsg));
}
- cfs_spin_lock(&imp->imp_lock);
- /** if replays by version then gap was occur on server, no trust to locks */
- if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
- imp->imp_no_lock_replay = 1;
- imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ /** if replays by version then gap occur on server, no trust to locks */
+ if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
+ imp->imp_no_lock_replay = 1;
+ imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
+ spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_last_replay_transno);
/* transaction number shouldn't be bigger than the latest replayed */
* ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
* this flag and then putting requests on sending_list or delayed_list.
*/
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
/* XXX locking? Maybe we should remove each request with the list
* locked? Also, how do we know if the requests on the list are
DEBUG_REQ(D_RPCTRACE, req, "inflight");
- cfs_spin_lock (&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
+ spin_lock(&req->rq_lock);
+ if (req->rq_import_generation < imp->imp_generation) {
+ req->rq_err = 1;
req->rq_status = -EIO;
- ptlrpc_client_wake_req(req);
- }
- cfs_spin_unlock (&req->rq_lock);
- }
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&req->rq_lock);
+ }
- cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
- struct ptlrpc_request *req =
- cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+ struct ptlrpc_request *req =
+ cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
- DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
+ DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
- cfs_spin_lock (&req->rq_lock);
- if (req->rq_import_generation < imp->imp_generation) {
- req->rq_err = 1;
+ spin_lock(&req->rq_lock);
+ if (req->rq_import_generation < imp->imp_generation) {
+ req->rq_err = 1;
req->rq_status = -EIO;
- ptlrpc_client_wake_req(req);
- }
- cfs_spin_unlock (&req->rq_lock);
- }
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&req->rq_lock);
+ }
- /* Last chance to free reqs left on the replay list, but we
- * will still leak reqs that haven't committed. */
- if (imp->imp_replayable)
- ptlrpc_free_committed(imp);
+ /* Last chance to free reqs left on the replay list, but we
+ * will still leak reqs that haven't committed. */
+ if (imp->imp_replayable)
+ ptlrpc_free_committed(imp);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_abort_inflight);
cfs_list_entry(pos, struct ptlrpc_request,
rq_set_chain);
- cfs_spin_lock(&req->rq_lock);
- if (req->rq_phase != RQ_PHASE_RPC) {
- cfs_spin_unlock(&req->rq_lock);
- continue;
- }
+ spin_lock(&req->rq_lock);
+ if (req->rq_phase != RQ_PHASE_RPC) {
+ spin_unlock(&req->rq_lock);
+ continue;
+ }
- req->rq_err = 1;
- req->rq_status = -EINTR;
- ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
- }
+ req->rq_err = 1;
+ req->rq_status = -EINTR;
+ ptlrpc_client_wake_req(req);
+ spin_unlock(&req->rq_lock);
+ }
}
static __u64 ptlrpc_last_xid;
-static cfs_spinlock_t ptlrpc_last_xid_lock;
+static spinlock_t ptlrpc_last_xid_lock;
/**
* Initialize the XID for the node. This is common among all requests on
{
time_t now = cfs_time_current_sec();
- cfs_spin_lock_init(&ptlrpc_last_xid_lock);
+ spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
ptlrpc_last_xid >>= 2;
*/
__u64 ptlrpc_next_xid(void)
{
- __u64 tmp;
- cfs_spin_lock(&ptlrpc_last_xid_lock);
- tmp = ++ptlrpc_last_xid;
- cfs_spin_unlock(&ptlrpc_last_xid_lock);
- return tmp;
+ __u64 tmp;
+ spin_lock(&ptlrpc_last_xid_lock);
+ tmp = ++ptlrpc_last_xid;
+ spin_unlock(&ptlrpc_last_xid_lock);
+ return tmp;
}
EXPORT_SYMBOL(ptlrpc_next_xid);
__u64 ptlrpc_sample_next_xid(void)
{
#if BITS_PER_LONG == 32
- /* need to avoid possible word tearing on 32-bit systems */
- __u64 tmp;
- cfs_spin_lock(&ptlrpc_last_xid_lock);
- tmp = ptlrpc_last_xid + 1;
- cfs_spin_unlock(&ptlrpc_last_xid_lock);
- return tmp;
+ /* need to avoid possible word tearing on 32-bit systems */
+ __u64 tmp;
+ spin_lock(&ptlrpc_last_xid_lock);
+ tmp = ptlrpc_last_xid + 1;
+ spin_unlock(&ptlrpc_last_xid_lock);
+ return tmp;
#else
- /* No need to lock, since returned value is racy anyways */
- return ptlrpc_last_xid + 1;
+ /* No need to lock, since returned value is racy anyways */
+ return ptlrpc_last_xid + 1;
#endif
}
EXPORT_SYMBOL(ptlrpc_sample_next_xid);
req->rq_must_unlink = 0;
req->rq_no_delay = req->rq_no_resend = 1;
- cfs_spin_lock_init(&req->rq_lock);
+ spin_lock_init(&req->rq_lock);
CFS_INIT_LIST_HEAD(&req->rq_list);
CFS_INIT_LIST_HEAD(&req->rq_replay_list);
CFS_INIT_LIST_HEAD(&req->rq_set_chain);
/* Failed send: make it seem like the reply timed out, just
* like failing sends in client.c does currently... */
- cfs_spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
ptlrpc_client_wake_req(req);
}
for adaptive timeouts' early reply. */
LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
req->rq_receiving_reply = 0;
req->rq_early = 0;
/* NB don't unlock till after wakeup; req can disappear under us
* since we don't have our own ref */
ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&req->rq_lock);
- EXIT;
+ spin_unlock(&req->rq_lock);
+ EXIT;
}
/*
"event type %d, status %d, desc %p\n",
ev->type, ev->status, desc);
- cfs_spin_lock(&desc->bd_lock);
+ spin_lock(&desc->bd_lock);
req = desc->bd_req;
LASSERT(desc->bd_network_rw);
desc->bd_network_rw = 0;
desc->bd_sender = ev->sender;
} else {
/* start reconnect and resend if network error hit */
- cfs_spin_lock(&req->rq_lock);
- req->rq_net_err = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_net_err = 1;
+ spin_unlock(&req->rq_lock);
}
/* release the encrypted pages for write */
* otherwise */
ptlrpc_client_wake_req(req);
- cfs_spin_unlock(&desc->bd_lock);
- EXIT;
+ spin_unlock(&desc->bd_lock);
+ EXIT;
}
/*
req->rq_self = ev->target.nid;
req->rq_rqbd = rqbd;
req->rq_phase = RQ_PHASE_NEW;
- cfs_spin_lock_init(&req->rq_lock);
+ spin_lock_init(&req->rq_lock);
CFS_INIT_LIST_HEAD(&req->rq_timed_list);
CFS_INIT_LIST_HEAD(&req->rq_exp_list);
cfs_atomic_set(&req->rq_refcount, 1);
CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
ptlrpc_req_add_history(svcpt, req);
* has been queued and we unlock, so do the wake now... */
cfs_waitq_signal(&svcpt->scp_waitq);
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
EXIT;
}
if (ev->unlinked) {
/* Last network callback. The net's ref on 'rs' stays put
* until ptlrpc_handle_rs() is done with it */
- cfs_spin_lock(&svcpt->scp_rep_lock);
- cfs_spin_lock(&rs->rs_lock);
+ spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&rs->rs_lock);
rs->rs_on_net = 0;
if (!rs->rs_no_ack ||
rs->rs_export->exp_obd->obd_last_committed)
ptlrpc_schedule_difficult_reply(rs);
- cfs_spin_unlock(&rs->rs_lock);
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&rs->rs_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
}
EXIT;
}
"event type %d, status %d, desc %p\n",
ev->type, ev->status, desc);
- cfs_spin_lock(&desc->bd_lock);
+ spin_lock(&desc->bd_lock);
if ((ev->type == LNET_EVENT_ACK ||
ev->type == LNET_EVENT_REPLY) &&
cfs_waitq_signal(&desc->bd_waitq);
}
- cfs_spin_unlock(&desc->bd_lock);
- EXIT;
+ spin_unlock(&desc->bd_lock);
+ EXIT;
}
#endif
liblustre_register_wait_callback("liblustre_check_services",
&liblustre_check_services,
NULL);
- cfs_init_completion_module(liblustre_wait_event);
+ init_completion_module(liblustre_wait_event);
#endif
rc = ptlrpcd_addref();
if (rc == 0)
RETURN(-EINVAL);
}
- cfs_spin_lock(&obd->obd_dev_lock);
- if (obd->obd_stopping) {
- CERROR("obd %s has stopped\n", obdname);
- cfs_spin_unlock(&obd->obd_dev_lock);
- RETURN(-EINVAL);
- }
-
- if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
- CERROR("obd %s is not a client device\n", obdname);
- cfs_spin_unlock(&obd->obd_dev_lock);
- RETURN(-EINVAL);
- }
- cfs_spin_unlock(&obd->obd_dev_lock);
-
- cfs_down_read(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import == NULL) {
- CERROR("obd %s: import has gone\n", obd->obd_name);
- cfs_up_read(&obd->u.cli.cl_sem);
- RETURN(-EINVAL);
- }
- imp = class_import_get(obd->u.cli.cl_import);
- cfs_up_read(&obd->u.cli.cl_sem);
+ spin_lock(&obd->obd_dev_lock);
+ if (obd->obd_stopping) {
+ CERROR("obd %s has stopped\n", obdname);
+ spin_unlock(&obd->obd_dev_lock);
+ RETURN(-EINVAL);
+ }
+
+ if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
+ strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
+ CERROR("obd %s is not a client device\n", obdname);
+ spin_unlock(&obd->obd_dev_lock);
+ RETURN(-EINVAL);
+ }
+ spin_unlock(&obd->obd_dev_lock);
+
+ down_read(&obd->u.cli.cl_sem);
+ if (obd->u.cli.cl_import == NULL) {
+ CERROR("obd %s: import has gone\n", obd->obd_name);
+ up_read(&obd->u.cli.cl_sem);
+ RETURN(-EINVAL);
+ }
+ imp = class_import_get(obd->u.cli.cl_import);
+ up_read(&obd->u.cli.cl_sem);
if (imp->imp_deactive) {
CERROR("import has been deactivated\n");
GSS_SEQ_WIN_MAIN / 4)
struct gss_svc_seq_data {
- cfs_spinlock_t ssd_lock;
+ spinlock_t ssd_lock;
/*
* highest sequence number seen so far, for main and back window
*/
};
struct gss_sec {
- struct ptlrpc_sec gs_base;
- struct gss_api_mech *gs_mech;
- cfs_spinlock_t gs_lock;
- __u64 gs_rvs_hdl;
+ struct ptlrpc_sec gs_base;
+ struct gss_api_mech *gs_mech;
+ spinlock_t gs_lock;
+ __u64 gs_rvs_hdl;
};
struct gss_sec_pipefs {
/*
* specially serialize upcalls for root context.
*/
- cfs_mutex_t gsk_root_uc_lock;
+ struct mutex gsk_root_uc_lock;
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- cfs_mutex_t gsk_uc_lock; /* serialize upcalls */
+ struct mutex gsk_uc_lock; /* serialize upcalls */
#endif
};
static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
{
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- cfs_mutex_lock(&gsec_kr->gsk_uc_lock);
+ mutex_lock(&gsec_kr->gsk_uc_lock);
#endif
}
static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
{
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- cfs_mutex_unlock(&gsec_kr->gsk_uc_lock);
+ mutex_unlock(&gsec_kr->gsk_uc_lock);
#endif
}
}
ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
- cfs_clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
+ clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */
return ctx;
LASSERT(sec);
LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
LASSERT(gctx_kr->gck_key == NULL);
ctx_clear_timer_kr(ctx);
* - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
*/
-static inline void spin_lock_if(cfs_spinlock_t *lock, int condition)
+static inline void spin_lock_if(spinlock_t *lock, int condition)
{
- if (condition)
- cfs_spin_lock(lock);
+ if (condition)
+ spin_lock(lock);
}
-static inline void spin_unlock_if(cfs_spinlock_t *lock, int condition)
+static inline void spin_unlock_if(spinlock_t *lock, int condition)
{
- if (condition)
- cfs_spin_unlock(lock);
+ if (condition)
+ spin_unlock(lock);
}
static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
{
- struct ptlrpc_sec *sec = ctx->cc_sec;
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- LASSERT(!cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- spin_lock_if(&sec->ps_lock, !locked);
+ spin_lock_if(&sec->ps_lock, !locked);
- cfs_atomic_inc(&ctx->cc_refcount);
- cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
- if (is_root)
- gsec_kr->gsk_root_ctx = ctx;
+ cfs_atomic_inc(&ctx->cc_refcount);
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+ if (is_root)
+ gsec_kr->gsk_root_ctx = ctx;
- spin_unlock_if(&sec->ps_lock, !locked);
+ spin_unlock_if(&sec->ps_lock, !locked);
}
/*
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
/* if hashed bit has gone, leave the job to somebody who is doing it */
- if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
return 0;
/* drop ref inside spin lock to prevent race with other operations */
static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
{
LASSERT(key->payload.data == ctx);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
/* must revoke the key, or others may treat it as newly created */
key_revoke_locked(key);
static
struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
{
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct ptlrpc_cli_ctx *ctx = NULL;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_cli_ctx *ctx = NULL;
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
ctx = gsec_kr->gsk_root_ctx;
cfs_atomic_inc(&ctx->cc_refcount);
}
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- return ctx;
+ return ctx;
}
#define RVS_CTX_EXPIRE_NICE (10)
LASSERT(sec_is_reverse(sec));
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
now = cfs_time_current_sec();
if (key)
bind_key_ctx(key, new_ctx);
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
}
static void construct_key_desc(void *buf, int bufsize,
CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
- cfs_mutex_init(&gsec_kr->gsk_root_uc_lock);
+ mutex_init(&gsec_kr->gsk_root_uc_lock);
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- cfs_mutex_init(&gsec_kr->gsk_uc_lock);
+ mutex_init(&gsec_kr->gsk_uc_lock);
#endif
if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
* the root upcall lock, make sure nobody else populated new root
* context after last check. */
if (is_root) {
- cfs_mutex_lock(&gsec_kr->gsk_root_uc_lock);
+ mutex_lock(&gsec_kr->gsk_root_uc_lock);
ctx = sec_lookup_root_ctx_kr(sec);
if (ctx)
key_put(key);
out:
if (is_root)
- cfs_mutex_unlock(&gsec_kr->gsk_root_uc_lock);
+ mutex_unlock(&gsec_kr->gsk_root_uc_lock);
RETURN(ctx);
}
gsec_kr = sec2gsec_keyring(sec);
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
cfs_atomic_read(&ctx->cc_refcount) - 2);
}
- cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
- if (!grace)
- cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
cfs_atomic_inc(&ctx->cc_refcount);
cfs_atomic_dec(&ctx->cc_refcount);
}
}
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- dispose_ctx_list_kr(&freelist);
- EXIT;
+ dispose_ctx_list_kr(&freelist);
+ EXIT;
}
static
CWARN("running gc\n");
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
cfs_atomic_dec(&ctx->cc_refcount);
}
}
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- dispose_ctx_list_kr(&freelist);
- EXIT;
- return;
+ dispose_ctx_list_kr(&freelist);
+ EXIT;
+ return;
}
static
time_t now = cfs_time_current_sec();
ENTRY;
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
struct key *key;
gss_handle_to_u64(&gctx->gc_svc_handle),
mech);
}
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- RETURN(0);
+ RETURN(0);
}
/****************************************
*/
LASSERT(cfs_current()->signal->session_keyring);
- cfs_lockdep_off();
+ lockdep_off();
rc = key_link(cfs_current()->signal->session_keyring, key);
- cfs_lockdep_on();
+ lockdep_on();
if (unlikely(rc)) {
CERROR("failed to link key %08x to keyring %08x: %d\n",
key->serial,
cli_ctx_expire(ctx);
if (rc != -ERESTART)
- cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
}
/* let user space think it's a success */
#include "gss_asn1.h"
#include "gss_krb5.h"
-static cfs_spinlock_t krb5_seq_lock;
+static spinlock_t krb5_seq_lock;
struct krb5_enctype {
char *ke_dispname;
}
khdr->kh_filler = 0xff;
- cfs_spin_lock(&krb5_seq_lock);
- khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
- cfs_spin_unlock(&krb5_seq_lock);
+ spin_lock(&krb5_seq_lock);
+ khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+ spin_unlock(&krb5_seq_lock);
}
static __u32 verify_krb5_header(struct krb5_ctx *kctx,
int __init init_kerberos_module(void)
{
- int status;
+ int status;
- cfs_spin_lock_init(&krb5_seq_lock);
+ spin_lock_init(&krb5_seq_lock);
- status = lgss_mech_register(&gss_kerberos_mech);
- if (status)
- CERROR("Failed to register kerberos gss mechanism!\n");
- return status;
+ status = lgss_mech_register(&gss_kerberos_mech);
+ if (status)
+ CERROR("Failed to register kerberos gss mechanism!\n");
+ return status;
}
void __exit cleanup_kerberos_module(void)
int lgss_mech_register(struct gss_api_mech *gm)
{
- cfs_spin_lock(®istered_mechs_lock);
- cfs_list_add(&gm->gm_list, ®istered_mechs);
- cfs_spin_unlock(®istered_mechs_lock);
- CWARN("Register %s mechanism\n", gm->gm_name);
- return 0;
+ spin_lock(®istered_mechs_lock);
+ cfs_list_add(&gm->gm_list, ®istered_mechs);
+ spin_unlock(®istered_mechs_lock);
+ CWARN("Register %s mechanism\n", gm->gm_name);
+ return 0;
}
void lgss_mech_unregister(struct gss_api_mech *gm)
{
- cfs_spin_lock(®istered_mechs_lock);
- cfs_list_del(&gm->gm_list);
- cfs_spin_unlock(®istered_mechs_lock);
- CWARN("Unregister %s mechanism\n", gm->gm_name);
+ spin_lock(®istered_mechs_lock);
+ cfs_list_del(&gm->gm_list);
+ spin_unlock(®istered_mechs_lock);
+ CWARN("Unregister %s mechanism\n", gm->gm_name);
}
struct gss_api_mech *lgss_name_to_mech(char *name)
{
- struct gss_api_mech *pos, *gm = NULL;
-
- cfs_spin_lock(®istered_mechs_lock);
- cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) {
- if (0 == strcmp(name, pos->gm_name)) {
- if (!cfs_try_module_get(pos->gm_owner))
- continue;
- gm = pos;
- break;
- }
- }
- cfs_spin_unlock(®istered_mechs_lock);
- return gm;
+ struct gss_api_mech *pos, *gm = NULL;
+
+ spin_lock(®istered_mechs_lock);
+ cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) {
+ if (0 == strcmp(name, pos->gm_name)) {
+ if (!cfs_try_module_get(pos->gm_owner))
+ continue;
+ gm = pos;
+ break;
+ }
+ }
+ spin_unlock(®istered_mechs_lock);
+ return gm;
}
struct gss_api_mech *lgss_subflavor_to_mech(__u32 subflavor)
{
- struct gss_api_mech *pos, *gm = NULL;
+ struct gss_api_mech *pos, *gm = NULL;
- cfs_spin_lock(®istered_mechs_lock);
+ spin_lock(®istered_mechs_lock);
cfs_list_for_each_entry(pos, ®istered_mechs, gm_list) {
if (!cfs_try_module_get(pos->gm_owner))
continue;
gm = pos;
break;
}
- cfs_spin_unlock(®istered_mechs_lock);
- return gm;
+ spin_unlock(®istered_mechs_lock);
+ return gm;
}
void lgss_mech_put(struct gss_api_mech *gm)
static
void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
{
- cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
cfs_atomic_inc(&ctx->cc_refcount);
cfs_hlist_add_head(&ctx->cc_cache, hash);
}
{
LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
- cfs_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
__cfs_hlist_del(&ctx->cc_cache);
{
LASSERT(ctx->cc_sec);
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
return ctx_check_death_pf(ctx, freelist);
}
cc_cache);
LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT,
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
&ctx->cc_flags) == 0);
cfs_hlist_del_init(&ctx->cc_cache);
static
void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
{
- LASSERT(ctx->cc_sec);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- cli_ctx_expire(ctx);
+ cli_ctx_expire(ctx);
- cfs_spin_lock(&ctx->cc_sec->ps_lock);
+ spin_lock(&ctx->cc_sec->ps_lock);
- if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
+ LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
- cfs_hlist_del_init(&ctx->cc_cache);
- if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
- LBUG();
- }
+ cfs_hlist_del_init(&ctx->cc_cache);
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
+ LBUG();
+ }
- cfs_spin_unlock(&ctx->cc_sec->ps_lock);
+ spin_unlock(&ctx->cc_sec->ps_lock);
}
/****************************************
(__u64) new->cc_vcred.vc_uid);
LASSERT(hash < gsec_pf->gsp_chash_size);
- cfs_spin_lock(&gsec->gs_base.ps_lock);
+ spin_lock(&gsec->gs_base.ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_pf->gsp_chash[hash], cc_cache) {
ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
- cfs_spin_unlock(&gsec->gs_base.ps_lock);
+ spin_unlock(&gsec->gs_base.ps_lock);
ctx_list_destroy_pf(&freelist);
EXIT;
LASSERT(hash < gsec_pf->gsp_chash_size);
retry:
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
/* gc_next == 0 means never do gc */
if (remove_dead && sec->ps_gc_next &&
} else {
/* don't allocate for reverse sec */
if (sec_is_reverse(sec)) {
- cfs_spin_unlock(&sec->ps_lock);
- RETURN(NULL);
- }
-
- if (new) {
- ctx_enhash_pf(new, hash_head);
- ctx = new;
- } else if (create) {
- cfs_spin_unlock(&sec->ps_lock);
- new = ctx_create_pf(sec, vcred);
- if (new) {
- cfs_clear_bit(PTLRPC_CTX_NEW_BIT,
- &new->cc_flags);
- goto retry;
- }
- } else
- ctx = NULL;
- }
-
- /* hold a ref */
- if (ctx)
- cfs_atomic_inc(&ctx->cc_refcount);
-
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
+ RETURN(NULL);
+ }
+
+ if (new) {
+ ctx_enhash_pf(new, hash_head);
+ ctx = new;
+ } else if (create) {
+ spin_unlock(&sec->ps_lock);
+ new = ctx_create_pf(sec, vcred);
+ if (new) {
+ clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
+ goto retry;
+ }
+ } else {
+ ctx = NULL;
+ }
+ }
+
+ /* hold a ref */
+ if (ctx)
+ cfs_atomic_inc(&ctx->cc_refcount);
+
+ spin_unlock(&sec->ps_lock);
/* the allocator of the context must give the first push to refresh */
if (new) {
struct ptlrpc_cli_ctx *ctx,
int sync)
{
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
* rpcs during destroy procedure. */
if (!sync)
- cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
/* destroy this context */
ctx_destroy_pf(sec, ctx);
gsec = container_of(sec, struct gss_sec, gs_base);
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_pf->gsp_chash[i],
}
ctx_unhash_pf(ctx, &freelist);
- cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
- if (!grace)
- cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT,
- &ctx->cc_flags);
- }
- }
- cfs_spin_unlock(&sec->ps_lock);
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT,
+ &ctx->cc_flags);
+ }
+ }
+ spin_unlock(&sec->ps_lock);
- ctx_list_destroy_pf(&freelist);
- RETURN(busy);
+ ctx_list_destroy_pf(&freelist);
+ RETURN(busy);
}
/****************************************
/* all upcall messgaes linked here */
static cfs_list_t upcall_lists[MECH_MAX];
/* and protected by this */
-static cfs_spinlock_t upcall_locks[MECH_MAX];
+static spinlock_t upcall_locks[MECH_MAX];
static inline
void upcall_list_lock(int idx)
{
- cfs_spin_lock(&upcall_locks[idx]);
+ spin_lock(&upcall_locks[idx]);
}
static inline
void upcall_list_unlock(int idx)
{
- cfs_spin_unlock(&upcall_locks[idx]);
+ spin_unlock(&upcall_locks[idx]);
}
static
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
sptlrpc_cli_ctx_expire(ctx);
- cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
}
}
ctx = &gctx->gc_base;
sptlrpc_cli_ctx_expire(ctx);
if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
- cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
- cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
+ test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
"fatal error" : "non-fatal");
}
de_pipes[MECH_KRB5] = de;
CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
- cfs_spin_lock_init(&upcall_locks[MECH_KRB5]);
+ spin_lock_init(&upcall_locks[MECH_KRB5]);
- return 0;
+ return 0;
}
static
#define GSS_SVC_UPCALL_TIMEOUT (20)
-static cfs_spinlock_t __ctx_index_lock;
+static spinlock_t __ctx_index_lock;
static __u64 __ctx_index;
__u64 gss_get_next_ctx_index(void)
{
- __u64 idx;
+ __u64 idx;
- cfs_spin_lock(&__ctx_index_lock);
- idx = __ctx_index++;
- cfs_spin_unlock(&__ctx_index_lock);
+ spin_lock(&__ctx_index_lock);
+ idx = __ctx_index++;
+ spin_unlock(&__ctx_index_lock);
- return idx;
+ return idx;
}
static inline unsigned long hash_mem(char *buf, int length, int bits)
tmp->ctx.gsc_mechctx = NULL;
memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
- cfs_spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+ spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
}
static void rsc_put(struct kref *ref)
goto out;
if (rv == -ENOENT) {
CERROR("NOENT? set rsc entry negative\n");
- cfs_set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
rawobj_t tmp_buf;
unsigned long ctx_expiry;
int n;
ENTRY;
- cfs_write_lock(&rsc_cache.hash_lock);
+ write_lock(&rsc_cache.hash_lock);
for (n = 0; n < RSC_HASHMAX; n++) {
for (ch = &rsc_cache.hash_table[n]; *ch;) {
rscp = container_of(*ch, struct rsc, h);
*ch = (*ch)->next;
rscp->h.next = NULL;
cache_get(&rscp->h);
- cfs_set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+ set_bit(CACHE_NEGATIVE, &rscp->h.flags);
COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
rsc_cache.entries--;
}
}
- cfs_write_unlock(&rsc_cache.hash_lock);
+ write_unlock(&rsc_cache.hash_lock);
EXIT;
}
first_check = 0;
read_lock(&rsi_cache.hash_lock);
- valid = cfs_test_bit(CACHE_VALID, &rsip->h.flags);
+ valid = test_bit(CACHE_VALID, &rsip->h.flags);
if (valid == 0)
cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
read_unlock(&rsi_cache.hash_lock);
if (rsci) {
/* if anything went wrong, we don't keep the context too */
if (rc != SECSVC_OK)
- cfs_set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ set_bit(CACHE_NEGATIVE, &rsci->h.flags);
else
CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
gss_handle_to_u64(&rsci->handle));
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
/* can't be found */
- cfs_set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ set_bit(CACHE_NEGATIVE, &rsc->h.flags);
/* to be removed at next scan */
rsc->h.expiry_time = 1;
}
int __init gss_init_svc_upcall(void)
{
- int i;
+ int i;
- cfs_spin_lock_init(&__ctx_index_lock);
+ spin_lock_init(&__ctx_index_lock);
/*
* this helps reducing context index confliction. after server reboot,
* conflicting request from clients might be filtered out by initial
* statistic of "out-of-sequence-window"
*/
static struct {
- cfs_spinlock_t oos_lock;
+ spinlock_t oos_lock;
cfs_atomic_t oos_cli_count; /* client occurrence */
int oos_cli_behind; /* client max seqs behind */
cfs_atomic_t oos_svc_replay[3]; /* server replay detected */
void gss_stat_oos_record_cli(int behind)
{
- cfs_atomic_inc(&gss_stat_oos.oos_cli_count);
+ cfs_atomic_inc(&gss_stat_oos.oos_cli_count);
- cfs_spin_lock(&gss_stat_oos.oos_lock);
- if (behind > gss_stat_oos.oos_cli_behind)
- gss_stat_oos.oos_cli_behind = behind;
- cfs_spin_unlock(&gss_stat_oos.oos_lock);
+ spin_lock(&gss_stat_oos.oos_lock);
+ if (behind > gss_stat_oos.oos_cli_behind)
+ gss_stat_oos.oos_cli_behind = behind;
+ spin_unlock(&gss_stat_oos.oos_lock);
}
void gss_stat_oos_record_svc(int phase, int replay)
int gss_init_lproc(void)
{
- int rc;
+ int rc;
- cfs_spin_lock_init(&gss_stat_oos.oos_lock);
+ spin_lock_init(&gss_stat_oos.oos_lock);
gss_proc_root = lprocfs_register("gss", sptlrpc_proc_root,
gss_lprocfs_vars, NULL);
{
LASSERT(cfs_atomic_read(&ctx->cc_refcount));
- if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+ if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
if (!ctx->cc_early_expire)
- cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
* destroying server side context when it be destroied. */
- cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (sec_is_reverse(ctx->cc_sec)) {
CWARN("server installed reverse ctx %p idx "LPX64", "
*/
switch (phase) {
case 0:
- if (cfs_test_bit(seq_num % win_size, window))
+ if (test_bit(seq_num % win_size, window))
goto replay;
break;
case 1:
*/
int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
{
- int rc = 0;
+ int rc = 0;
- cfs_spin_lock(&ssd->ssd_lock);
+ spin_lock(&ssd->ssd_lock);
if (set == 0) {
/*
gss_stat_oos_record_svc(2, 0);
}
exit:
- cfs_spin_unlock(&ssd->ssd_lock);
- return rc;
+ spin_unlock(&ssd->ssd_lock);
+ return rc;
}
/***************************************
return -EOPNOTSUPP;
}
- cfs_spin_lock_init(&gsec->gs_lock);
+ spin_lock_init(&gsec->gs_lock);
gsec->gs_rvs_hdl = 0ULL;
/* initialize upper ptlrpc_sec */
sec->ps_id = sptlrpc_get_next_secid();
sec->ps_flvr = *sf;
sec->ps_import = class_import_get(imp);
- cfs_spin_lock_init(&sec->ps_lock);
+ spin_lock_init(&sec->ps_lock);
CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
if (!svcctx) {
ctx->cc_expire = 0;
ctx->cc_flags = PTLRPC_CTX_NEW;
ctx->cc_vcred = *vcred;
- cfs_spin_lock_init(&ctx->cc_lock);
+ spin_lock_init(&ctx->cc_lock);
CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
} \
} while(0)
-#define IMPORT_SET_STATE(imp, state) \
-do { \
- cfs_spin_lock(&imp->imp_lock); \
- IMPORT_SET_STATE_NOLOCK(imp, state); \
- cfs_spin_unlock(&imp->imp_lock); \
+#define IMPORT_SET_STATE(imp, state) \
+do { \
+ spin_lock(&imp->imp_lock); \
+ IMPORT_SET_STATE_NOLOCK(imp, state); \
+ spin_unlock(&imp->imp_lock); \
} while(0)
* though. */
int ptlrpc_init_import(struct obd_import *imp)
{
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
- imp->imp_generation++;
- imp->imp_state = LUSTRE_IMP_NEW;
+ imp->imp_generation++;
+ imp->imp_state = LUSTRE_IMP_NEW;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
- return 0;
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_init_import);
*/
int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
{
- int rc = 0;
+ int rc = 0;
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_FULL &&
(conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
}
ptlrpc_deactivate_timeouts(imp);
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
- if (obd_dump_on_timeout)
- libcfs_debug_dumplog();
+ if (obd_dump_on_timeout)
+ libcfs_debug_dumplog();
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
- rc = 1;
- } else {
- cfs_spin_unlock(&imp->imp_lock);
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
+ rc = 1;
+ } else {
+ spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
imp->imp_client->cli_name, imp,
(imp->imp_state == LUSTRE_IMP_FULL &&
/* Must be called with imp_lock held! */
static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
{
- ENTRY;
- LASSERT_SPIN_LOCKED(&imp->imp_lock);
+ ENTRY;
+ LASSERT_SPIN_LOCKED(&imp->imp_lock);
- CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
- imp->imp_invalid = 1;
- imp->imp_generation++;
- cfs_spin_unlock(&imp->imp_lock);
+ CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
+ imp->imp_invalid = 1;
+ imp->imp_generation++;
+ spin_unlock(&imp->imp_lock);
- ptlrpc_abort_inflight(imp);
- obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
+ ptlrpc_abort_inflight(imp);
+ obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
- EXIT;
+ EXIT;
}
/*
*/
void ptlrpc_deactivate_import(struct obd_import *imp)
{
- cfs_spin_lock(&imp->imp_lock);
- ptlrpc_deactivate_and_unlock_import(imp);
+ spin_lock(&imp->imp_lock);
+ ptlrpc_deactivate_and_unlock_import(imp);
}
EXPORT_SYMBOL(ptlrpc_deactivate_import);
static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
{
- time_t now = cfs_time_current_sec();
- cfs_list_t *tmp, *n;
- struct ptlrpc_request *req;
- unsigned int timeout = 0;
-
- cfs_spin_lock(&imp->imp_lock);
- cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
- timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
- }
- cfs_spin_unlock(&imp->imp_lock);
- return timeout;
+ time_t now = cfs_time_current_sec();
+ cfs_list_t *tmp, *n;
+ struct ptlrpc_request *req;
+ unsigned int timeout = 0;
+
+ spin_lock(&imp->imp_lock);
+ cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
+ }
+ spin_unlock(&imp->imp_lock);
+ return timeout;
}
/**
cli_tgt, rc,
cfs_atomic_read(&imp->imp_inflight));
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
if (cfs_atomic_read(&imp->imp_inflight) == 0) {
int count = cfs_atomic_read(&imp->imp_unregistering);
cfs_atomic_read(&imp->
imp_unregistering));
}
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
}
} while (rc != 0);
/* unset imp_invalid */
void ptlrpc_activate_import(struct obd_import *imp)
{
- struct obd_device *obd = imp->imp_obd;
+ struct obd_device *obd = imp->imp_obd;
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_invalid = 0;
- ptlrpc_activate_timeouts(imp);
- cfs_spin_unlock(&imp->imp_lock);
- obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
+ spin_lock(&imp->imp_lock);
+ imp->imp_invalid = 0;
+ ptlrpc_activate_timeouts(imp);
+ spin_unlock(&imp->imp_lock);
+ obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
}
EXPORT_SYMBOL(ptlrpc_activate_import);
CDEBUG(D_HA, "%s: waking up pinger\n",
obd2cli_tgt(imp->imp_obd));
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_force_verify = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_force_verify = 1;
+ spin_unlock(&imp->imp_lock);
- ptlrpc_pinger_wake_up();
- }
- EXIT;
+ ptlrpc_pinger_wake_up();
+ }
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_fail_import);
int target_len, tried_all = 1;
ENTRY;
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
- if (cfs_list_empty(&imp->imp_conn_list)) {
- CERROR("%s: no connections available\n",
- imp->imp_obd->obd_name);
- cfs_spin_unlock(&imp->imp_lock);
- RETURN(-EINVAL);
- }
+ if (cfs_list_empty(&imp->imp_conn_list)) {
+ CERROR("%s: no connections available\n",
+ imp->imp_obd->obd_name);
+ spin_unlock(&imp->imp_lock);
+ RETURN(-EINVAL);
+ }
cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
- RETURN(0);
+ RETURN(0);
}
/*
int rc;
ENTRY;
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- cfs_spin_unlock(&imp->imp_lock);
- CERROR("can't connect to a closed import\n");
- RETURN(-EINVAL);
- } else if (imp->imp_state == LUSTRE_IMP_FULL) {
- cfs_spin_unlock(&imp->imp_lock);
- CERROR("already connected\n");
- RETURN(0);
- } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
- cfs_spin_unlock(&imp->imp_lock);
- CERROR("already connecting\n");
- RETURN(-EALREADY);
- }
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_CLOSED) {
+ spin_unlock(&imp->imp_lock);
+ CERROR("can't connect to a closed import\n");
+ RETURN(-EINVAL);
+ } else if (imp->imp_state == LUSTRE_IMP_FULL) {
+ spin_unlock(&imp->imp_lock);
+ CERROR("already connected\n");
+ RETURN(0);
+ } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
+ spin_unlock(&imp->imp_lock);
+ CERROR("already connecting\n");
+ RETURN(-EALREADY);
+ }
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
set_transno = ptlrpc_first_transno(imp,
&imp->imp_connect_data.ocd_transno);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
rc = import_select_connection(imp);
if (rc)
aa->pcaa_initial_connect = initial_connect;
if (aa->pcaa_initial_connect) {
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_replayable = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_replayable = 1;
+ spin_unlock(&imp->imp_lock);
lustre_msg_add_op_flags(request->rq_reqmsg,
MSG_CONNECT_INITIAL);
}
static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
{
#ifdef __KERNEL__
- int force_verify;
+ int force_verify;
- cfs_spin_lock(&imp->imp_lock);
- force_verify = imp->imp_force_verify != 0;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ force_verify = imp->imp_force_verify != 0;
+ spin_unlock(&imp->imp_lock);
- if (force_verify)
- ptlrpc_pinger_wake_up();
+ if (force_verify)
+ ptlrpc_pinger_wake_up();
#else
/* liblustre has no pinger thread, so we wakeup pinger anyway */
- ptlrpc_pinger_wake_up();
+ ptlrpc_pinger_wake_up();
#endif
}
struct obd_connect_data *ocd;
struct obd_export *exp;
int ret;
- ENTRY;
+ ENTRY;
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_CLOSED) {
- cfs_spin_unlock(&imp->imp_lock);
- RETURN(0);
- }
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_CLOSED) {
+ spin_unlock(&imp->imp_lock);
+ RETURN(0);
+ }
- if (rc) {
- /* if this reconnect to busy export - not need select new target
- * for connecting*/
- imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
- cfs_spin_unlock(&imp->imp_lock);
- ptlrpc_maybe_ping_import_soon(imp);
- GOTO(out, rc);
- }
- cfs_spin_unlock(&imp->imp_lock);
+ if (rc) {
+ /* if this reconnect to busy export - not need select new target
+ * for connecting*/
+ imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_maybe_ping_import_soon(imp);
+ GOTO(out, rc);
+ }
+ spin_unlock(&imp->imp_lock);
LASSERT(imp->imp_conn_current);
GOTO(out, rc);
}
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
/* All imports are pingable */
imp->imp_pingable = 1;
imp->imp_obd->obd_name, ocd->ocd_instance);
exp = class_conn2export(&imp->imp_dlm_handle);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
/* check that server granted subset of flags we asked for. */
if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
- if (aa->pcaa_initial_connect) {
- cfs_spin_lock(&imp->imp_lock);
- if (msg_flags & MSG_CONNECT_REPLAYABLE) {
- imp->imp_replayable = 1;
- cfs_spin_unlock(&imp->imp_lock);
- CDEBUG(D_HA, "connected to replayable target: %s\n",
- obd2cli_tgt(imp->imp_obd));
- } else {
- imp->imp_replayable = 0;
- cfs_spin_unlock(&imp->imp_lock);
- }
+ if (aa->pcaa_initial_connect) {
+ spin_lock(&imp->imp_lock);
+ if (msg_flags & MSG_CONNECT_REPLAYABLE) {
+ imp->imp_replayable = 1;
+ spin_unlock(&imp->imp_lock);
+ CDEBUG(D_HA, "connected to replayable target: %s\n",
+ obd2cli_tgt(imp->imp_obd));
+ } else {
+ imp->imp_replayable = 0;
+ spin_unlock(&imp->imp_lock);
+ }
/* if applies, adjust the imp->imp_msg_magic here
* according to reply flags */
imp->imp_obd->obd_name,
obd2cli_tgt(imp->imp_obd));
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_resend_replay = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_resend_replay = 1;
+ spin_unlock(&imp->imp_lock);
IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
} else {
}
} else {
- cfs_spin_lock(&imp->imp_lock);
- cfs_list_del(&imp->imp_conn_current->oic_item);
- cfs_list_add(&imp->imp_conn_current->oic_item,
- &imp->imp_conn_list);
- imp->imp_last_success_conn =
- imp->imp_conn_current->oic_last_attempt;
+ spin_lock(&imp->imp_lock);
+ cfs_list_del(&imp->imp_conn_current->oic_item);
+ cfs_list_add(&imp->imp_conn_current->oic_item,
+ &imp->imp_conn_list);
+ imp->imp_last_success_conn =
+ imp->imp_conn_current->oic_last_attempt;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
if (!ocd->ocd_ibits_known &&
ocd->ocd_connect_flags & OBD_CONNECT_IBITS)
obd2cli_tgt(imp->imp_obd),
imp->imp_connection->c_remote_uuid.uuid);
/* reset vbr_failed flag upon eviction */
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_vbr_failed = 0;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_vbr_failed = 0;
+ spin_unlock(&imp->imp_lock);
#ifdef __KERNEL__
/* bug 17802: XXX client_disconnect_export vs connect request
}
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_FULL)
- GOTO(out, 0);
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state != LUSTRE_IMP_FULL)
+ GOTO(out, 0);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
LUSTRE_OBD_VERSION, rq_opc);
}
set_state:
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
out:
- if (noclose)
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
- else
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
- memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
- cfs_spin_unlock(&imp->imp_lock);
-
- RETURN(rc);
+ if (noclose)
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
+ else
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
+ memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
+ spin_unlock(&imp->imp_lock);
+
+ RETURN(rc);
}
EXPORT_SYMBOL(ptlrpc_disconnect_import);
void ptlrpc_cleanup_imp(struct obd_import *imp)
{
- ENTRY;
+ ENTRY;
- cfs_spin_lock(&imp->imp_lock);
- IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
- imp->imp_generation++;
- cfs_spin_unlock(&imp->imp_lock);
- ptlrpc_abort_inflight(imp);
+ spin_lock(&imp->imp_lock);
+ IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
+ imp->imp_generation++;
+ spin_unlock(&imp->imp_lock);
+ ptlrpc_abort_inflight(imp);
- EXIT;
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_cleanup_imp);
drop to 0, and because 0 could mean an error */
return 0;
- cfs_spin_lock(&at->at_lock);
+ spin_lock(&at->at_lock);
if (unlikely(at->at_binstart == 0)) {
/* Special case to remove default from history */
/* if we changed, report the old value */
old = (at->at_current != old) ? old : 0;
- cfs_spin_unlock(&at->at_lock);
+ spin_unlock(&at->at_lock);
return old;
}
}
/* Not found in list, add it under a lock */
- cfs_spin_lock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
/* Check unused under lock */
for (; i < IMP_AT_MAX_PORTALS; i++) {
at->iat_portal[i] = portal;
out:
- cfs_spin_unlock(&imp->imp_lock);
- return i;
+ spin_unlock(&imp->imp_lock);
+ return i;
}
#include <libcfs/list.h>
#define LLOG_CLIENT_ENTRY(ctxt, imp) do { \
- cfs_mutex_lock(&ctxt->loc_mutex); \
+ mutex_lock(&ctxt->loc_mutex); \
if (ctxt->loc_imp) { \
imp = class_import_get(ctxt->loc_imp); \
} else { \
"but I'll try again next time. Not fatal.\n", \
ctxt->loc_idx); \
imp = NULL; \
- cfs_mutex_unlock(&ctxt->loc_mutex); \
+ mutex_unlock(&ctxt->loc_mutex); \
return (-EINVAL); \
} \
- cfs_mutex_unlock(&ctxt->loc_mutex); \
+ mutex_unlock(&ctxt->loc_mutex); \
} while(0)
#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
- cfs_mutex_lock(&ctxt->loc_mutex); \
+ mutex_lock(&ctxt->loc_mutex); \
if (ctxt->loc_imp != imp) \
CWARN("loc_imp has changed from %p to %p\n", \
ctxt->loc_imp, imp); \
class_import_put(imp); \
- cfs_mutex_unlock(&ctxt->loc_mutex); \
+ mutex_unlock(&ctxt->loc_mutex); \
} while(0)
/* This is a callback from the llog_* functions.
ENTRY;
LASSERT(ctxt);
- cfs_mutex_lock(&ctxt->loc_mutex);
+ mutex_lock(&ctxt->loc_mutex);
if (ctxt->loc_imp != imp) {
if (ctxt->loc_imp) {
CWARN("changing the import %p - %p\n",
}
ctxt->loc_imp = class_import_get(imp);
}
- cfs_mutex_unlock(&ctxt->loc_mutex);
+ mutex_unlock(&ctxt->loc_mutex);
RETURN(0);
}
EXPORT_SYMBOL(llog_receptor_accept);
new_imp = ctxt->loc_obd->u.cli.cl_import;
LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
"%p - %p\n", ctxt->loc_imp, new_imp);
- cfs_mutex_lock(&ctxt->loc_mutex);
+ mutex_lock(&ctxt->loc_mutex);
if (ctxt->loc_imp != new_imp) {
if (ctxt->loc_imp)
class_import_put(ctxt->loc_imp);
ctxt->loc_imp = class_import_get(new_imp);
}
- cfs_mutex_unlock(&ctxt->loc_mutex);
+ mutex_unlock(&ctxt->loc_mutex);
RETURN(0);
}
EXPORT_SYMBOL(llog_initiator_connect);
if (val > cfs_num_physpages/(2 * bufpages))
return -ERANGE;
- cfs_spin_lock(&svc->srv_lock);
+ spin_lock(&svc->srv_lock);
if (val == 0)
svc->srv_hist_nrqbds_cpt_max = 0;
else
svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
- cfs_spin_unlock(&svc->srv_lock);
+ spin_unlock(&svc->srv_lock);
return count;
}
if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
return -ERANGE;
- cfs_spin_lock(&svc->srv_lock);
+ spin_lock(&svc->srv_lock);
if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
- cfs_spin_unlock(&svc->srv_lock);
+ spin_unlock(&svc->srv_lock);
return -ERANGE;
}
svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
- cfs_spin_unlock(&svc->srv_lock);
+ spin_unlock(&svc->srv_lock);
return count;
}
if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
return -ERANGE;
- cfs_spin_lock(&svc->srv_lock);
+ spin_lock(&svc->srv_lock);
if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
- cfs_spin_unlock(&svc->srv_lock);
+ spin_unlock(&svc->srv_lock);
return -ERANGE;
}
svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
- cfs_spin_unlock(&svc->srv_lock);
+ spin_unlock(&svc->srv_lock);
return count;
}
ptlrpc_service_for_each_part(svcpt, i, svc) {
srhi->srhi_idx = i;
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, *pos);
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
if (rc == 0) {
*pos = srhi->srhi_seq;
return srhi;
srhi->srhi_idx = i;
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, *pos + 1);
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
if (rc == 0)
break;
}
svcpt = svc->srv_parts[srhi->srhi_idx];
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
svc->srv_ops.so_req_printer(s, srhi->srhi_req);
}
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
return rc;
}
if (val < 0)
return -ERANGE;
- cfs_spin_lock(&svc->srv_lock);
+ spin_lock(&svc->srv_lock);
svc->srv_hpreq_ratio = val;
- cfs_spin_unlock(&svc->srv_lock);
+ spin_unlock(&svc->srv_lock);
return count;
}
return -ERANGE;
LPROCFS_CLIMP_CHECK(obd);
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_no_pinger_recover = !val;
- cfs_spin_unlock(&imp->imp_lock);
- LPROCFS_CLIMP_EXIT(obd);
+ spin_lock(&imp->imp_lock);
+ imp->imp_no_pinger_recover = !val;
+ spin_unlock(&imp->imp_lock);
+ LPROCFS_CLIMP_EXIT(obd);
- return count;
+ return count;
}
EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
}
}
- cfs_spin_lock(&request->rq_lock);
+ spin_lock(&request->rq_lock);
/* If the MD attach succeeds, there _will_ be a reply_in callback */
request->rq_receiving_reply = !noreply;
/* We are responsible for unlinking the reply buffer */
request->rq_resend = 0;
request->rq_restart = 0;
request->rq_reply_truncate = 0;
- cfs_spin_unlock(&request->rq_lock);
+ spin_unlock(&request->rq_lock);
if (!noreply) {
reply_md.start = request->rq_repbuf;
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
- cfs_spin_lock(&request->rq_lock);
- /* ...but the MD attach didn't succeed... */
- request->rq_receiving_reply = 0;
- cfs_spin_unlock(&request->rq_lock);
+ spin_lock(&request->rq_lock);
+ /* ...but the MD attach didn't succeed... */
+ request->rq_receiving_reply = 0;
+ spin_unlock(&request->rq_lock);
GOTO(cleanup_me, rc = -ENOMEM);
}
#if RS_DEBUG
CFS_LIST_HEAD(ptlrpc_rs_debug_lru);
-cfs_spinlock_t ptlrpc_rs_debug_lock;
+spinlock_t ptlrpc_rs_debug_lock;
-#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
-do { \
- cfs_spin_lock(&ptlrpc_rs_debug_lock); \
- cfs_list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
- cfs_spin_unlock(&ptlrpc_rs_debug_lock); \
+#define PTLRPC_RS_DEBUG_LRU_ADD(rs) \
+do { \
+ spin_lock(&ptlrpc_rs_debug_lock); \
+ cfs_list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru); \
+ spin_unlock(&ptlrpc_rs_debug_lock); \
} while (0)
-#define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
-do { \
- cfs_spin_lock(&ptlrpc_rs_debug_lock); \
- cfs_list_del(&(rs)->rs_debug_list); \
- cfs_spin_unlock(&ptlrpc_rs_debug_lock); \
+#define PTLRPC_RS_DEBUG_LRU_DEL(rs) \
+do { \
+ spin_lock(&ptlrpc_rs_debug_lock); \
+ cfs_list_del(&(rs)->rs_debug_list); \
+ spin_unlock(&ptlrpc_rs_debug_lock); \
} while (0)
#else
# define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
{
struct ptlrpc_reply_state *rs = NULL;
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
/* See if we have anything in a pool, and wait if nothing */
while (cfs_list_empty(&svcpt->scp_rep_idle)) {
struct l_wait_info lwi;
int rc;
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
/* If we cannot get anything for some long time, we better
* bail out instead of waiting infinitely */
lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
!cfs_list_empty(&svcpt->scp_rep_idle), &lwi);
if (rc != 0)
goto out;
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
}
rs = cfs_list_entry(svcpt->scp_rep_idle.next,
struct ptlrpc_reply_state, rs_list);
cfs_list_del(&rs->rs_list);
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
LASSERT(rs != NULL);
memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
{
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
cfs_waitq_signal(&svcpt->scp_rep_waitq);
}
LASSERT(req->rq_reply_state == NULL);
if ((flags & LPRFL_EARLY_REPLY) == 0) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_packed_final = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_packed_final = 1;
+ spin_unlock(&req->rq_lock);
}
msg_len = lustre_msg_size_v2(count, lens);
CFS_INIT_LIST_HEAD(&rs->rs_exp_list);
CFS_INIT_LIST_HEAD(&rs->rs_obd_list);
CFS_INIT_LIST_HEAD(&rs->rs_list);
- cfs_spin_lock_init(&rs->rs_lock);
+ spin_lock_init(&rs->rs_lock);
req->rq_replen = msg_len;
req->rq_reply_state = rs;
#include <obd_class.h>
#include "ptlrpc_internal.h"
-cfs_mutex_t pinger_mutex;
+struct mutex pinger_mutex;
static CFS_LIST_HEAD(pinger_imports);
static cfs_list_t timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
struct ptlrpc_request *
cfs_time_t timeout = PING_INTERVAL;
/* The timeout list is a increase order sorted list */
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
int ti_timeout = item->ti_timeout;
if (timeout > ti_timeout)
timeout = ti_timeout;
break;
}
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
cfs_time_current());
static void ptlrpc_pinger_process_import(struct obd_import *imp,
unsigned long this_ping)
{
- int force, level;
+ int force, level;
- cfs_spin_lock(&imp->imp_lock);
- level = imp->imp_state;
- force = imp->imp_force_verify;
- if (force)
- imp->imp_force_verify = 0;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ level = imp->imp_state;
+ force = imp->imp_force_verify;
+ if (force)
+ imp->imp_force_verify = 0;
+ spin_unlock(&imp->imp_lock);
CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA,
"level %s/%u force %u deactive %u pingable %u\n",
struct timeout_item *item;
cfs_list_t *iter;
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
item->ti_cb(item, item->ti_cb_data);
}
cfs_time_seconds(PING_INTERVAL))))
ptlrpc_update_next_ping(imp, 0);
}
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
/* update memory usage info */
obd_update_maxusage();
RETURN(-EALREADY);
ptlrpc_pinger_remove_timeouts();
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
thread_set_flags(pinger_thread, SVC_STOPPING);
cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
l_wait_event(pinger_thread->t_ctl_waitq,
thread_is_stopped(pinger_thread), &lwi);
if (!cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-EALREADY);
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
CDEBUG(D_HA, "adding pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
/* if we add to pinger we want recovery on this import */
class_import_get(imp);
ptlrpc_pinger_wake_up();
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
RETURN(0);
}
if (cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_del_init(&imp->imp_pinger_chain);
CDEBUG(D_HA, "removing pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
/* if we remove from pinger we don't want recovery on this import */
imp->imp_obd->obd_no_recov = 1;
class_import_put(imp);
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
RETURN(0);
}
EXPORT_SYMBOL(ptlrpc_pinger_del_import);
{
struct timeout_item *ti;
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
if (!ti) {
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
return (-EINVAL);
}
cfs_list_add(obd_list, &ti->ti_obd_list);
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
return 0;
}
EXPORT_SYMBOL(ptlrpc_add_timeout_client);
if (cfs_list_empty(obd_list))
return 0;
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_del_init(obd_list);
/**
* If there are no obd attached to the timeout event
cfs_list_del(&ti->ti_chain);
OBD_FREE_PTR(ti);
}
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
return 0;
}
EXPORT_SYMBOL(ptlrpc_del_timeout_client);
{
struct timeout_item *item, *tmp;
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
LASSERT(cfs_list_empty(&item->ti_obd_list));
cfs_list_del(&item->ti_chain);
OBD_FREE_PTR(item);
}
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
return 0;
}
int ping_evictor_wake(struct obd_export *exp)
{
- struct obd_device *obd;
+ struct obd_device *obd;
- cfs_spin_lock(&pet_lock);
- if (pet_state != PET_READY) {
- /* eventually the new obd will call here again. */
- cfs_spin_unlock(&pet_lock);
- return 1;
- }
+ spin_lock(&pet_lock);
+ if (pet_state != PET_READY) {
+ /* eventually the new obd will call here again. */
+ spin_unlock(&pet_lock);
+ return 1;
+ }
- obd = class_exp2obd(exp);
- if (cfs_list_empty(&obd->obd_evict_list)) {
- class_incref(obd, "evictor", obd);
- cfs_list_add(&obd->obd_evict_list, &pet_list);
- }
- cfs_spin_unlock(&pet_lock);
+ obd = class_exp2obd(exp);
+ if (cfs_list_empty(&obd->obd_evict_list)) {
+ class_incref(obd, "evictor", obd);
+ cfs_list_add(&obd->obd_evict_list, &pet_list);
+ }
+ spin_unlock(&pet_lock);
- cfs_waitq_signal(&pet_waitq);
- return 0;
+ cfs_waitq_signal(&pet_waitq);
+ return 0;
}
static int ping_evictor_main(void *arg)
/* we only get here if pet_exp != NULL, and the end of this
* loop is the only place which sets it NULL again, so lock
* is not strictly necessary. */
- cfs_spin_lock(&pet_lock);
- obd = cfs_list_entry(pet_list.next, struct obd_device,
- obd_evict_list);
- cfs_spin_unlock(&pet_lock);
+ spin_lock(&pet_lock);
+ obd = cfs_list_entry(pet_list.next, struct obd_device,
+ obd_evict_list);
+ spin_unlock(&pet_lock);
expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
* the obd lock (class_unlink_export), which means we can't
* lose the last ref on the export. If they've already been
* removed from the list, we won't find them here. */
- cfs_spin_lock(&obd->obd_dev_lock);
- while (!cfs_list_empty(&obd->obd_exports_timed)) {
- exp = cfs_list_entry(obd->obd_exports_timed.next,
- struct obd_export,
- exp_obd_chain_timed);
- if (expire_time > exp->exp_last_request_time) {
- class_export_get(exp);
- cfs_spin_unlock(&obd->obd_dev_lock);
- LCONSOLE_WARN("%s: haven't heard from client %s"
+ spin_lock(&obd->obd_dev_lock);
+ while (!cfs_list_empty(&obd->obd_exports_timed)) {
+ exp = cfs_list_entry(obd->obd_exports_timed.next,
+ struct obd_export,
+ exp_obd_chain_timed);
+ if (expire_time > exp->exp_last_request_time) {
+ class_export_get(exp);
+ spin_unlock(&obd->obd_dev_lock);
+ LCONSOLE_WARN("%s: haven't heard from client %s"
" (at %s) in %ld seconds. I think"
" it's dead, and I am evicting"
" it. exp %p, cur %ld expire %ld"
exp->exp_last_request_time);
class_fail_export(exp);
class_export_put(exp);
- cfs_spin_lock(&obd->obd_dev_lock);
- } else {
- /* List is sorted, so everyone below is ok */
- break;
- }
- }
- cfs_spin_unlock(&obd->obd_dev_lock);
-
- cfs_spin_lock(&pet_lock);
- cfs_list_del_init(&obd->obd_evict_list);
- cfs_spin_unlock(&pet_lock);
+ spin_lock(&obd->obd_dev_lock);
+ } else {
+ /* List is sorted, so everyone below is ok */
+ break;
+ }
+ }
+ spin_unlock(&obd->obd_dev_lock);
+
+ spin_lock(&pet_lock);
+ cfs_list_del_init(&obd->obd_evict_list);
+ spin_unlock(&pet_lock);
class_decref(obd, "evictor", obd);
}
set = pd->pd_set;
/* add rpcs into set */
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_for_each(iter, &pinger_imports) {
struct obd_import *imp = cfs_list_entry(iter, struct obd_import,
imp_pinger_chain);
if (cfs_time_aftereq(pd->pd_this_ping,
imp->imp_next_ping - 5 * CFS_TICK)) {
/* Add a ping. */
- cfs_spin_lock(&imp->imp_lock);
- generation = imp->imp_generation;
- level = imp->imp_state;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ generation = imp->imp_generation;
+ level = imp->imp_state;
+ spin_unlock(&imp->imp_lock);
if (level != LUSTRE_IMP_FULL) {
CDEBUG(D_HA,
}
}
pd->pd_this_ping = curtime;
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
/* Might be empty, that's OK. */
if (cfs_atomic_read(&set->set_remaining) == 0)
}
/* Expire all the requests that didn't come back. */
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_for_each(iter, &set->set_requests) {
req = cfs_list_entry(iter, struct ptlrpc_request,
rq_set_chain);
* phase and take care of inflights. */
ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
imp = req->rq_import;
- cfs_spin_lock(&imp->imp_lock);
- if (!cfs_list_empty(&req->rq_list)) {
- cfs_list_del_init(&req->rq_list);
- cfs_atomic_dec(&imp->imp_inflight);
- }
- cfs_spin_unlock(&imp->imp_lock);
- cfs_atomic_dec(&set->set_remaining);
- }
- cfs_mutex_unlock(&pinger_mutex);
+ spin_lock(&imp->imp_lock);
+ if (!cfs_list_empty(&req->rq_list)) {
+ cfs_list_del_init(&req->rq_list);
+ cfs_atomic_dec(&imp->imp_inflight);
+ }
+ spin_unlock(&imp->imp_lock);
+ cfs_atomic_dec(&set->set_remaining);
+ }
+ mutex_unlock(&pinger_mutex);
ptlrpc_set_destroy(set);
pd->pd_set = NULL;
void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
{
#ifdef ENABLE_PINGER
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
ptlrpc_update_next_ping(imp, 0);
if (pinger_args.pd_set == NULL &&
cfs_time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
imp->imp_next_ping, cfs_time_current());
pinger_args.pd_next_ping = imp->imp_next_ping;
}
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
#endif
}
void ptlrpc_pinger_commit_expected(struct obd_import *imp)
{
#ifdef ENABLE_PINGER
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
ptlrpc_update_next_ping(imp, 1);
if (pinger_args.pd_set == NULL &&
cfs_time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
imp->imp_next_ping, cfs_time_current());
pinger_args.pd_next_ping = imp->imp_next_ping;
}
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
#endif
}
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
ptlrpc_pinger_sending_on_import(imp);
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
class_import_get(imp);
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
RETURN(0);
}
if (cfs_list_empty(&imp->imp_pinger_chain))
RETURN(-ENOENT);
- cfs_mutex_lock(&pinger_mutex);
+ mutex_lock(&pinger_mutex);
cfs_list_del_init(&imp->imp_pinger_chain);
CDEBUG(D_HA, "removing pingable import %s->%s\n",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
class_import_put(imp);
- cfs_mutex_unlock(&pinger_mutex);
+ mutex_unlock(&pinger_mutex);
RETURN(0);
}
#include "ptlrpc_internal.h"
-extern cfs_spinlock_t ptlrpc_last_xid_lock;
+extern spinlock_t ptlrpc_last_xid_lock;
#if RS_DEBUG
-extern cfs_spinlock_t ptlrpc_rs_debug_lock;
+extern spinlock_t ptlrpc_rs_debug_lock;
#endif
-extern cfs_spinlock_t ptlrpc_all_services_lock;
-extern cfs_mutex_t pinger_mutex;
-extern cfs_mutex_t ptlrpcd_mutex;
+extern spinlock_t ptlrpc_all_services_lock;
+extern struct mutex pinger_mutex;
+extern struct mutex ptlrpcd_mutex;
__init int ptlrpc_init(void)
{
lustre_assert_wire_constants();
#if RS_DEBUG
- cfs_spin_lock_init(&ptlrpc_rs_debug_lock);
+ spin_lock_init(&ptlrpc_rs_debug_lock);
#endif
- cfs_spin_lock_init(&ptlrpc_all_services_lock);
- cfs_mutex_init(&pinger_mutex);
- cfs_mutex_init(&ptlrpcd_mutex);
+ spin_lock_init(&ptlrpc_all_services_lock);
+ mutex_init(&pinger_mutex);
+ mutex_init(&ptlrpcd_mutex);
ptlrpc_init_xid();
rc = req_layout_init();
#endif
static struct ptlrpcd *ptlrpcds;
-cfs_mutex_t ptlrpcd_mutex;
+struct mutex ptlrpcd_mutex;
static int ptlrpcd_users = 0;
void ptlrpcd_wake(struct ptlrpc_request *req)
}
#ifdef __KERNEL__
- cfs_spin_lock(&new->set_new_req_lock);
- cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
- i = cfs_atomic_read(&set->set_remaining);
- count = cfs_atomic_add_return(i, &new->set_new_count);
- cfs_atomic_set(&set->set_remaining, 0);
- cfs_spin_unlock(&new->set_new_req_lock);
+ spin_lock(&new->set_new_req_lock);
+ cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
+ i = cfs_atomic_read(&set->set_remaining);
+ count = cfs_atomic_add_return(i, &new->set_new_count);
+ cfs_atomic_set(&set->set_remaining, 0);
+ spin_unlock(&new->set_new_req_lock);
if (count == i) {
cfs_waitq_signal(&new->set_waitq);
struct ptlrpc_request *req;
int rc = 0;
- cfs_spin_lock(&src->set_new_req_lock);
+ spin_lock(&src->set_new_req_lock);
if (likely(!cfs_list_empty(&src->set_new_requests))) {
cfs_list_for_each_safe(pos, tmp, &src->set_new_requests) {
req = cfs_list_entry(pos, struct ptlrpc_request,
cfs_atomic_add(rc, &des->set_remaining);
cfs_atomic_set(&src->set_new_count, 0);
}
- cfs_spin_unlock(&src->set_new_req_lock);
- return rc;
+ spin_unlock(&src->set_new_req_lock);
+ return rc;
}
#endif
if (req->rq_reqmsg)
lustre_msg_set_jobid(req->rq_reqmsg, NULL);
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
if (req->rq_invalid_rqset) {
struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
back_to_sleep, NULL);
req->rq_invalid_rqset = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
} else if (req->rq_set) {
/* If we have a vaid "rq_set", just reuse it to avoid double
/* ptlrpc_check_set will decrease the count */
cfs_atomic_inc(&req->rq_set->set_remaining);
- cfs_spin_unlock(&req->rq_lock);
- cfs_waitq_signal(&req->rq_set->set_waitq);
- return;
- } else {
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
+ cfs_waitq_signal(&req->rq_set->set_waitq);
+ return;
+ } else {
+ spin_unlock(&req->rq_lock);
}
pc = ptlrpcd_select_pc(req, policy, idx);
ENTRY;
if (cfs_atomic_read(&set->set_new_count)) {
- cfs_spin_lock(&set->set_new_req_lock);
+ spin_lock(&set->set_new_req_lock);
if (likely(!cfs_list_empty(&set->set_new_requests))) {
cfs_list_splice_init(&set->set_new_requests,
&set->set_requests);
*/
rc = 1;
}
- cfs_spin_unlock(&set->set_new_req_lock);
+ spin_unlock(&set->set_new_req_lock);
}
/* We should call lu_env_refill() before handling new requests to make
if (partner == NULL)
continue;
- cfs_spin_lock(&partner->pc_lock);
- ps = partner->pc_set;
- if (ps == NULL) {
- cfs_spin_unlock(&partner->pc_lock);
- continue;
- }
+ spin_lock(&partner->pc_lock);
+ ps = partner->pc_set;
+ if (ps == NULL) {
+ spin_unlock(&partner->pc_lock);
+ continue;
+ }
- ptlrpc_reqset_get(ps);
- cfs_spin_unlock(&partner->pc_lock);
+ ptlrpc_reqset_get(ps);
+ spin_unlock(&partner->pc_lock);
if (cfs_atomic_read(&ps->set_new_count)) {
rc = ptlrpcd_steal_rqset(set, ps);
cfs_daemonize_ctxt(pc->pc_name);
#if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
- if (cfs_test_bit(LIOD_BIND, &pc->pc_flags)) {
+ if (test_bit(LIOD_BIND, &pc->pc_flags)) {
int index = pc->pc_index;
if (index >= 0 && index < cfs_num_possible_cpus()) {
*/
rc = lu_context_init(&env.le_ctx,
LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
- cfs_complete(&pc->pc_starting);
+ complete(&pc->pc_starting);
if (rc != 0)
RETURN(rc);
/*
* Abort inflight rpcs for forced stop case.
*/
- if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
- if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
+ if (test_bit(LIOD_STOP, &pc->pc_flags)) {
+ if (test_bit(LIOD_FORCE, &pc->pc_flags))
ptlrpc_abort_set(set);
exit++;
}
ptlrpc_set_wait(set);
lu_context_fini(&env.le_ctx);
- cfs_clear_bit(LIOD_START, &pc->pc_flags);
- cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
- cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
- cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
+ clear_bit(LIOD_START, &pc->pc_flags);
+ clear_bit(LIOD_STOP, &pc->pc_flags);
+ clear_bit(LIOD_FORCE, &pc->pc_flags);
+ clear_bit(LIOD_BIND, &pc->pc_flags);
- cfs_complete(&pc->pc_finishing);
+ complete(&pc->pc_finishing);
return 0;
}
break;
case PDB_POLICY_FULL:
pc->pc_npartners = 0;
- cfs_set_bit(LIOD_BIND, &pc->pc_flags);
+ set_bit(LIOD_BIND, &pc->pc_flags);
break;
case PDB_POLICY_PAIR:
LASSERT(max % 2 == 0);
for (i = max; i < cfs_num_online_cpus(); i++)
cpu_clear(i, mask);
pc->pc_npartners = cpus_weight(mask) - 1;
- cfs_set_bit(LIOD_BIND, &pc->pc_flags);
+ set_bit(LIOD_BIND, &pc->pc_flags);
#else
LASSERT(max >= 3);
pc->pc_npartners = 2;
switch (ptlrpcd_bind_policy) {
case PDB_POLICY_PAIR:
if (index & 0x1) {
- cfs_set_bit(LIOD_BIND, &pc->pc_flags);
+ set_bit(LIOD_BIND, &pc->pc_flags);
pc->pc_partners[0] = &ptlrpcds->
pd_threads[index - 1];
ptlrpcds->pd_threads[index - 1].
pc->pc_npartners = pidx;
#else
if (index & 0x1)
- cfs_set_bit(LIOD_BIND, &pc->pc_flags);
+ set_bit(LIOD_BIND, &pc->pc_flags);
if (index > 0) {
pc->pc_partners[0] = &ptlrpcds->
pd_threads[index - 1];
/*
* XXX: send replay requests.
*/
- if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
+ if (test_bit(LIOD_RECOVERY, &pc->pc_flags))
rc = ptlrpcd_check(&pc->pc_env, pc);
lu_context_exit(&pc->pc_env.le_ctx);
}
/*
* Do not allow start second thread for one pc.
*/
- if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
- CWARN("Starting second thread (%s) for same pc %p\n",
- name, pc);
- RETURN(0);
- }
-
- pc->pc_index = index;
- cfs_init_completion(&pc->pc_starting);
- cfs_init_completion(&pc->pc_finishing);
- cfs_spin_lock_init(&pc->pc_lock);
+ if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
+ CWARN("Starting second thread (%s) for same pc %p\n",
+ name, pc);
+ RETURN(0);
+ }
+
+ pc->pc_index = index;
+ init_completion(&pc->pc_starting);
+ init_completion(&pc->pc_finishing);
+ spin_lock_init(&pc->pc_lock);
strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
pc->pc_set = ptlrpc_prep_set();
if (pc->pc_set == NULL)
GOTO(out, rc);
rc = 0;
- cfs_wait_for_completion(&pc->pc_starting);
+ wait_for_completion(&pc->pc_starting);
#else
pc->pc_wait_callback =
liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
if (pc->pc_set != NULL) {
struct ptlrpc_request_set *set = pc->pc_set;
- cfs_spin_lock(&pc->pc_lock);
- pc->pc_set = NULL;
- cfs_spin_unlock(&pc->pc_lock);
- ptlrpc_set_destroy(set);
- }
- if (env != 0)
- lu_context_fini(&pc->pc_env.le_ctx);
- cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
+ spin_lock(&pc->pc_lock);
+ pc->pc_set = NULL;
+ spin_unlock(&pc->pc_lock);
+ ptlrpc_set_destroy(set);
+ }
+ if (env != 0)
+ lu_context_fini(&pc->pc_env.le_ctx);
+ clear_bit(LIOD_BIND, &pc->pc_flags);
#else
- SET_BUT_UNUSED(env);
+ SET_BUT_UNUSED(env);
#endif
- cfs_clear_bit(LIOD_START, &pc->pc_flags);
- }
- RETURN(rc);
+ clear_bit(LIOD_START, &pc->pc_flags);
+ }
+ RETURN(rc);
}
void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
struct ptlrpc_request_set *set = pc->pc_set;
ENTRY;
- if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
+ if (!test_bit(LIOD_START, &pc->pc_flags)) {
CWARN("Thread for pc %p was not started\n", pc);
goto out;
}
- cfs_set_bit(LIOD_STOP, &pc->pc_flags);
- if (force)
- cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
- cfs_waitq_signal(&pc->pc_set->set_waitq);
+ set_bit(LIOD_STOP, &pc->pc_flags);
+ if (force)
+ set_bit(LIOD_FORCE, &pc->pc_flags);
+ cfs_waitq_signal(&pc->pc_set->set_waitq);
#ifdef __KERNEL__
- cfs_wait_for_completion(&pc->pc_finishing);
+ wait_for_completion(&pc->pc_finishing);
#else
- liblustre_deregister_wait_callback(pc->pc_wait_callback);
- liblustre_deregister_idle_callback(pc->pc_idle_callback);
+ liblustre_deregister_wait_callback(pc->pc_wait_callback);
+ liblustre_deregister_idle_callback(pc->pc_idle_callback);
#endif
- lu_context_fini(&pc->pc_env.le_ctx);
+ lu_context_fini(&pc->pc_env.le_ctx);
- cfs_spin_lock(&pc->pc_lock);
- pc->pc_set = NULL;
- cfs_spin_unlock(&pc->pc_lock);
- ptlrpc_set_destroy(set);
+ spin_lock(&pc->pc_lock);
+ pc->pc_set = NULL;
+ spin_unlock(&pc->pc_lock);
+ ptlrpc_set_destroy(set);
out:
#ifdef __KERNEL__
GOTO(out, rc = -ENOMEM);
snprintf(name, 15, "ptlrpcd_rcv");
- cfs_set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
+ set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
if (rc < 0)
GOTO(out, rc);
int rc = 0;
ENTRY;
- cfs_mutex_lock(&ptlrpcd_mutex);
+ mutex_lock(&ptlrpcd_mutex);
if (++ptlrpcd_users == 1)
rc = ptlrpcd_init();
- cfs_mutex_unlock(&ptlrpcd_mutex);
+ mutex_unlock(&ptlrpcd_mutex);
RETURN(rc);
}
EXPORT_SYMBOL(ptlrpcd_addref);
void ptlrpcd_decref(void)
{
- cfs_mutex_lock(&ptlrpcd_mutex);
+ mutex_lock(&ptlrpcd_mutex);
if (--ptlrpcd_users == 0)
ptlrpcd_fini();
- cfs_mutex_unlock(&ptlrpcd_mutex);
+ mutex_unlock(&ptlrpcd_mutex);
}
EXPORT_SYMBOL(ptlrpcd_decref);
/** @} ptlrpcd */
llcd->llcd_cookiebytes = 0;
llcd->llcd_size = size;
- cfs_spin_lock(&lcm->lcm_lock);
- llcd->llcd_lcm = lcm;
- cfs_atomic_inc(&lcm->lcm_count);
- cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
- cfs_spin_unlock(&lcm->lcm_lock);
+ spin_lock(&lcm->lcm_lock);
+ llcd->llcd_lcm = lcm;
+ cfs_atomic_inc(&lcm->lcm_count);
+ cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
+ spin_unlock(&lcm->lcm_lock);
cfs_atomic_inc(&llcd_count);
CDEBUG(D_RPCTRACE, "Alloc llcd %p on lcm %p (%d)\n",
llcd_print(llcd, __FUNCTION__, __LINE__);
LBUG();
}
- cfs_spin_lock(&lcm->lcm_lock);
- LASSERT(!cfs_list_empty(&llcd->llcd_list));
- cfs_list_del_init(&llcd->llcd_list);
- cfs_atomic_dec(&lcm->lcm_count);
- cfs_spin_unlock(&lcm->lcm_lock);
+ spin_lock(&lcm->lcm_lock);
+ LASSERT(!cfs_list_empty(&llcd->llcd_list));
+ cfs_list_del_init(&llcd->llcd_list);
+ cfs_atomic_dec(&lcm->lcm_count);
+ spin_unlock(&lcm->lcm_lock);
CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
* Check if we're in exit stage. Do not send llcd in
* this case.
*/
- if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(exit, rc = -ENODEV);
CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd);
* Let all know that we're stopping. This will also make
* llcd_send() refuse any new llcds.
*/
- cfs_set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
+ set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
/*
* Stop processing thread. No new rpcs will be accepted for
CERROR("Busy llcds found (%d) on lcm %p\n",
cfs_atomic_read(&lcm->lcm_count), lcm);
- cfs_spin_lock(&lcm->lcm_lock);
- cfs_list_for_each(tmp, &lcm->lcm_llcds) {
- llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt,
- llcd_list);
- llcd_print(llcd, __FUNCTION__, __LINE__);
- }
- cfs_spin_unlock(&lcm->lcm_lock);
+ spin_lock(&lcm->lcm_lock);
+ cfs_list_for_each(tmp, &lcm->lcm_llcds) {
+ llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt,
+ llcd_list);
+ llcd_print(llcd, __func__, __LINE__);
+ }
+ spin_unlock(&lcm->lcm_lock);
/*
* No point to go further with busy llcds at this point
cfs_atomic_set(&lcm->lcm_count, 0);
cfs_atomic_set(&lcm->lcm_refcount, 1);
- cfs_spin_lock_init(&lcm->lcm_lock);
+ spin_lock_init(&lcm->lcm_lock);
CFS_INIT_LIST_HEAD(&lcm->lcm_llcds);
rc = llog_recov_thread_start(lcm);
if (rc) {
/*
* Start recovery in separate thread.
*/
- cfs_mutex_lock(&ctxt->loc_mutex);
+ mutex_lock(&ctxt->loc_mutex);
ctxt->loc_gen = *gen;
rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid);
- cfs_mutex_unlock(&ctxt->loc_mutex);
+ mutex_unlock(&ctxt->loc_mutex);
RETURN(rc);
}
LASSERT(ctxt != NULL);
- cfs_mutex_lock(&ctxt->loc_mutex);
+ mutex_lock(&ctxt->loc_mutex);
if (!ctxt->loc_lcm) {
CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
GOTO(out, rc = -ENODEV);
GOTO(out, rc = -ENODEV);
}
- if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
+ if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n",
ctxt);
GOTO(out, rc = -ENODEV);
* Allocation is successful, let's check for stop
* flag again to fall back as soon as possible.
*/
- if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(out, rc = -ENODEV);
}
* Allocation is successful, let's check for stop
* flag again to fall back as soon as possible.
*/
- if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+ if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
GOTO(out, rc = -ENODEV);
}
if (flags & OBD_LLOG_FL_EXIT)
ctxt->loc_flags = LLOG_CTXT_FLAG_STOP;
- cfs_mutex_unlock(&ctxt->loc_mutex);
+ mutex_unlock(&ctxt->loc_mutex);
return rc;
}
EXPORT_SYMBOL(llog_obd_repl_cancel);
/*
* Flush any remaining llcd.
*/
- cfs_mutex_lock(&ctxt->loc_mutex);
+ mutex_lock(&ctxt->loc_mutex);
if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
/*
* This is ost->mds connection, we can't be sure that mds
if (flags & OBD_LLOG_FL_EXIT)
ctxt->loc_flags = LLOG_CTXT_FLAG_STOP;
- cfs_mutex_unlock(&ctxt->loc_mutex);
+ mutex_unlock(&ctxt->loc_mutex);
} else {
/*
* This is either llog_sync() from generic llog code or sync
* llcds to the target with waiting for completion.
*/
CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
- cfs_mutex_unlock(&ctxt->loc_mutex);
+ mutex_unlock(&ctxt->loc_mutex);
rc = llog_cancel(NULL, ctxt, NULL, 0, NULL,
OBD_LLOG_FL_SENDNOW | flags);
}
/* It might have committed some after we last spoke, so make sure we
* get rid of them now.
*/
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_last_transno_checked = 0;
- ptlrpc_free_committed(imp);
- last_transno = imp->imp_last_replay_transno;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_last_transno_checked = 0;
+ ptlrpc_free_committed(imp);
+ last_transno = imp->imp_last_replay_transno;
+ spin_unlock(&imp->imp_lock);
CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
imp, obd2cli_tgt(imp->imp_obd),
req = NULL;
}
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_resend_replay = 0;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_resend_replay = 0;
+ spin_unlock(&imp->imp_lock);
if (req != NULL) {
rc = ptlrpc_replay_req(req);
*/
/* Well... what if lctl recover is called twice at the same time?
*/
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_RECOVER) {
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state != LUSTRE_IMP_RECOVER) {
+ spin_unlock(&imp->imp_lock);
RETURN(-1);
}
if (!ptlrpc_no_resend(req))
ptlrpc_resend_req(req);
}
- cfs_spin_unlock(&imp->imp_lock);
+ spin_unlock(&imp->imp_lock);
- RETURN(0);
+ RETURN(0);
}
EXPORT_SYMBOL(ptlrpc_resend);
*/
void ptlrpc_wake_delayed(struct obd_import *imp)
{
- cfs_list_t *tmp, *pos;
- struct ptlrpc_request *req;
+ cfs_list_t *tmp, *pos;
+ struct ptlrpc_request *req;
- cfs_spin_lock(&imp->imp_lock);
- cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
- req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+ spin_lock(&imp->imp_lock);
+ cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
+ req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
- DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
- ptlrpc_client_wake_req(req);
- }
- cfs_spin_unlock(&imp->imp_lock);
+ DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&imp->imp_lock);
}
EXPORT_SYMBOL(ptlrpc_wake_delayed);
/* Wait for recovery to complete and resend. If evicted, then
this request will be errored out later.*/
- cfs_spin_lock(&failed_req->rq_lock);
- if (!failed_req->rq_no_resend)
- failed_req->rq_resend = 1;
- cfs_spin_unlock(&failed_req->rq_lock);
+ spin_lock(&failed_req->rq_lock);
+ if (!failed_req->rq_no_resend)
+ failed_req->rq_resend = 1;
+ spin_unlock(&failed_req->rq_lock);
- EXIT;
+ EXIT;
}
/**
/* set before invalidate to avoid messages about imp_inval
* set without imp_deactive in ptlrpc_import_delay_req */
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_deactive = 1;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 1;
+ spin_unlock(&imp->imp_lock);
obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
CDEBUG(D_HA, "setting import %s VALID\n",
obd2cli_tgt(imp->imp_obd));
- cfs_spin_lock(&imp->imp_lock);
- imp->imp_deactive = 0;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ imp->imp_deactive = 0;
+ spin_unlock(&imp->imp_lock);
obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
rc = ptlrpc_recover_import(imp, NULL, 0);
/* Attempt to reconnect an import */
int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
{
- int rc = 0;
- ENTRY;
-
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
- cfs_atomic_read(&imp->imp_inval_count))
- rc = -EINVAL;
- cfs_spin_unlock(&imp->imp_lock);
+ int rc = 0;
+ ENTRY;
+
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
+ cfs_atomic_read(&imp->imp_inval_count))
+ rc = -EINVAL;
+ spin_unlock(&imp->imp_lock);
if (rc)
GOTO(out, rc);
}
/* Check if reconnect is already in progress */
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_state != LUSTRE_IMP_DISCON) {
- imp->imp_force_verify = 1;
- rc = -EALREADY;
- }
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state != LUSTRE_IMP_DISCON) {
+ imp->imp_force_verify = 1;
+ rc = -EALREADY;
+ }
+ spin_unlock(&imp->imp_lock);
if (rc)
GOTO(out, rc);
int ptlrpc_import_in_recovery(struct obd_import *imp)
{
- int in_recovery = 1;
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_state == LUSTRE_IMP_FULL ||
- imp->imp_state == LUSTRE_IMP_CLOSED ||
- imp->imp_state == LUSTRE_IMP_DISCON)
- in_recovery = 0;
- cfs_spin_unlock(&imp->imp_lock);
- return in_recovery;
+ int in_recovery = 1;
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_state == LUSTRE_IMP_FULL ||
+ imp->imp_state == LUSTRE_IMP_CLOSED ||
+ imp->imp_state == LUSTRE_IMP_DISCON)
+ in_recovery = 0;
+ spin_unlock(&imp->imp_lock);
+ return in_recovery;
}
* policy registers *
***********************************************/
-static cfs_rwlock_t policy_lock;
+static rwlock_t policy_lock;
static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
NULL,
};
if (number >= SPTLRPC_POLICY_MAX)
return -EINVAL;
- cfs_write_lock(&policy_lock);
+ write_lock(&policy_lock);
if (unlikely(policies[number])) {
- cfs_write_unlock(&policy_lock);
+ write_unlock(&policy_lock);
return -EALREADY;
}
policies[number] = policy;
- cfs_write_unlock(&policy_lock);
+ write_unlock(&policy_lock);
CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
return 0;
LASSERT(number < SPTLRPC_POLICY_MAX);
- cfs_write_lock(&policy_lock);
+ write_lock(&policy_lock);
if (unlikely(policies[number] == NULL)) {
- cfs_write_unlock(&policy_lock);
+ write_unlock(&policy_lock);
CERROR("%s: already unregistered\n", policy->sp_name);
return -EINVAL;
}
LASSERT(policies[number] == policy);
policies[number] = NULL;
- cfs_write_unlock(&policy_lock);
+ write_unlock(&policy_lock);
CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
return 0;
static
struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
{
- static CFS_DEFINE_MUTEX(load_mutex);
+ static DEFINE_MUTEX(load_mutex);
static cfs_atomic_t loaded = CFS_ATOMIC_INIT(0);
struct ptlrpc_sec_policy *policy;
__u16 number = SPTLRPC_FLVR_POLICY(flavor);
return NULL;
while (1) {
- cfs_read_lock(&policy_lock);
+ read_lock(&policy_lock);
policy = policies[number];
if (policy && !cfs_try_module_get(policy->sp_owner))
policy = NULL;
if (policy == NULL)
flag = cfs_atomic_read(&loaded);
- cfs_read_unlock(&policy_lock);
+ read_unlock(&policy_lock);
if (policy != NULL || flag != 0 ||
number != SPTLRPC_POLICY_GSS)
break;
/* try to load gss module, once */
- cfs_mutex_lock(&load_mutex);
+ mutex_lock(&load_mutex);
if (cfs_atomic_read(&loaded) == 0) {
if (cfs_request_module("ptlrpc_gss") == 0)
CDEBUG(D_SEC,
cfs_atomic_set(&loaded, 1);
}
- cfs_mutex_unlock(&load_mutex);
+ mutex_unlock(&load_mutex);
}
return policy;
*/
void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
{
- struct ptlrpc_request *req, *next;
+ struct ptlrpc_request *req, *next;
- cfs_spin_lock(&ctx->cc_lock);
- cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
- rq_ctx_chain) {
- cfs_list_del_init(&req->rq_ctx_chain);
- ptlrpc_client_wake_req(req);
- }
- cfs_spin_unlock(&ctx->cc_lock);
+ spin_lock(&ctx->cc_lock);
+ cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
+ rq_ctx_chain) {
+ cfs_list_del_init(&req->rq_ctx_chain);
+ ptlrpc_client_wake_req(req);
+ }
+ spin_unlock(&ctx->cc_lock);
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
static int import_sec_check_expire(struct obd_import *imp)
{
- int adapt = 0;
+ int adapt = 0;
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_sec_expire &&
- imp->imp_sec_expire < cfs_time_current_sec()) {
- adapt = 1;
- imp->imp_sec_expire = 0;
- }
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_sec_expire &&
+ imp->imp_sec_expire < cfs_time_current_sec()) {
+ adapt = 1;
+ imp->imp_sec_expire = 0;
+ }
+ spin_unlock(&imp->imp_lock);
if (!adapt)
return 0;
* in the context waiting list.
*/
if (!cfs_list_empty(&req->rq_ctx_chain)) {
- cfs_spin_lock(&req->rq_cli_ctx->cc_lock);
- cfs_list_del_init(&req->rq_ctx_chain);
- cfs_spin_unlock(&req->rq_cli_ctx->cc_lock);
+ spin_lock(&req->rq_cli_ctx->cc_lock);
+ cfs_list_del_init(&req->rq_ctx_chain);
+ spin_unlock(&req->rq_cli_ctx->cc_lock);
}
sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
LASSERT(newctx);
if (unlikely(newctx == oldctx &&
- cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
+ test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
/*
* still get the old dead ctx, usually means system too busy
*/
static
void ctx_refresh_interrupt(void *data)
{
- struct ptlrpc_request *req = data;
+ struct ptlrpc_request *req = data;
- cfs_spin_lock(&req->rq_lock);
- req->rq_intr = 1;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
}
static
void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
{
- cfs_spin_lock(&ctx->cc_lock);
- if (!cfs_list_empty(&req->rq_ctx_chain))
- cfs_list_del_init(&req->rq_ctx_chain);
- cfs_spin_unlock(&ctx->cc_lock);
+ spin_lock(&ctx->cc_lock);
+ if (!cfs_list_empty(&req->rq_ctx_chain))
+ cfs_list_del_init(&req->rq_ctx_chain);
+ spin_unlock(&ctx->cc_lock);
}
/**
if (cli_ctx_is_eternal(ctx))
RETURN(0);
- if (unlikely(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
+ if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
LASSERT(ctx->cc_ops->refresh);
ctx->cc_ops->refresh(ctx);
}
- LASSERT(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
LASSERT(ctx->cc_ops->validate);
if (ctx->cc_ops->validate(ctx) == 0) {
RETURN(0);
}
- if (unlikely(cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_err = 1;
- cfs_spin_unlock(&req->rq_lock);
+ if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
+ spin_lock(&req->rq_lock);
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
req_off_ctx_list(req, ctx);
RETURN(-EPERM);
}
* 2. Current context never be refreshed, then we are fine: we
* never really send request with old context before.
*/
- if (cfs_test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
- unlikely(req->rq_reqmsg) &&
- lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
- req_off_ctx_list(req, ctx);
- RETURN(0);
- }
-
- if (unlikely(cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
- req_off_ctx_list(req, ctx);
- /*
- * don't switch ctx if import was deactivated
- */
- if (req->rq_import->imp_deactive) {
- cfs_spin_lock(&req->rq_lock);
- req->rq_err = 1;
- cfs_spin_unlock(&req->rq_lock);
- RETURN(-EINTR);
- }
-
- rc = sptlrpc_req_replace_dead_ctx(req);
- if (rc) {
- LASSERT(ctx == req->rq_cli_ctx);
- CERROR("req %p: failed to replace dead ctx %p: %d\n",
- req, ctx, rc);
- cfs_spin_lock(&req->rq_lock);
- req->rq_err = 1;
- cfs_spin_unlock(&req->rq_lock);
+ if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
+ unlikely(req->rq_reqmsg) &&
+ lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
+ req_off_ctx_list(req, ctx);
+ RETURN(0);
+ }
+
+ if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
+ req_off_ctx_list(req, ctx);
+ /*
+ * don't switch ctx if import was deactivated
+ */
+ if (req->rq_import->imp_deactive) {
+ spin_lock(&req->rq_lock);
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
+ RETURN(-EINTR);
+ }
+
+ rc = sptlrpc_req_replace_dead_ctx(req);
+ if (rc) {
+ LASSERT(ctx == req->rq_cli_ctx);
+ CERROR("req %p: failed to replace dead ctx %p: %d\n",
+ req, ctx, rc);
+ spin_lock(&req->rq_lock);
+ req->rq_err = 1;
+ spin_unlock(&req->rq_lock);
RETURN(rc);
}
* Now we're sure this context is during upcall, add myself into
* waiting list
*/
- cfs_spin_lock(&ctx->cc_lock);
- if (cfs_list_empty(&req->rq_ctx_chain))
- cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
- cfs_spin_unlock(&ctx->cc_lock);
-
- if (timeout < 0)
- RETURN(-EWOULDBLOCK);
-
- /* Clear any flags that may be present from previous sends */
- LASSERT(req->rq_receiving_reply == 0);
- cfs_spin_lock(&req->rq_lock);
- req->rq_err = 0;
- req->rq_timedout = 0;
- req->rq_resend = 0;
- req->rq_restart = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_lock(&ctx->cc_lock);
+ if (cfs_list_empty(&req->rq_ctx_chain))
+ cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
+ spin_unlock(&ctx->cc_lock);
+
+ if (timeout < 0)
+ RETURN(-EWOULDBLOCK);
+
+ /* Clear any flags that may be present from previous sends */
+ LASSERT(req->rq_receiving_reply == 0);
+ spin_lock(&req->rq_lock);
+ req->rq_err = 0;
+ req->rq_timedout = 0;
+ req->rq_resend = 0;
+ req->rq_restart = 0;
+ spin_unlock(&req->rq_lock);
lwi = LWI_TIMEOUT_INTR(timeout * CFS_HZ, ctx_refresh_timeout,
ctx_refresh_interrupt, req);
sec = req->rq_cli_ctx->cc_sec;
- cfs_spin_lock(&sec->ps_lock);
- req->rq_flvr = sec->ps_flvr;
- cfs_spin_unlock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
+ req->rq_flvr = sec->ps_flvr;
+ spin_unlock(&sec->ps_lock);
/* force SVC_NULL for context initiation rpc, SVC_INTG for context
* destruction rpc */
if (!req)
RETURN(-ENOMEM);
- cfs_spin_lock_init(&req->rq_lock);
+ spin_lock_init(&req->rq_lock);
cfs_atomic_set(&req->rq_refcount, 10000);
CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
cfs_waitq_init(&req->rq_reply_waitq);
GOTO(err_req, rc = -ENOMEM);
/* sanity checkings and copy data out, do it inside spinlock */
- cfs_spin_lock(&req->rq_lock);
-
- if (req->rq_replied) {
- cfs_spin_unlock(&req->rq_lock);
- GOTO(err_buf, rc = -EALREADY);
- }
-
- LASSERT(req->rq_repbuf);
- LASSERT(req->rq_repdata == NULL);
- LASSERT(req->rq_repmsg == NULL);
-
- if (req->rq_reply_off != 0) {
- CERROR("early reply with offset %u\n", req->rq_reply_off);
- cfs_spin_unlock(&req->rq_lock);
- GOTO(err_buf, rc = -EPROTO);
- }
-
- if (req->rq_nob_received != early_size) {
- /* even another early arrived the size should be the same */
- CERROR("data size has changed from %u to %u\n",
- early_size, req->rq_nob_received);
- cfs_spin_unlock(&req->rq_lock);
- GOTO(err_buf, rc = -EINVAL);
- }
-
- if (req->rq_nob_received < sizeof(struct lustre_msg)) {
- CERROR("early reply length %d too small\n",
- req->rq_nob_received);
- cfs_spin_unlock(&req->rq_lock);
- GOTO(err_buf, rc = -EALREADY);
- }
-
- memcpy(early_buf, req->rq_repbuf, early_size);
- cfs_spin_unlock(&req->rq_lock);
-
- cfs_spin_lock_init(&early_req->rq_lock);
+ spin_lock(&req->rq_lock);
+
+ if (req->rq_replied) {
+ spin_unlock(&req->rq_lock);
+ GOTO(err_buf, rc = -EALREADY);
+ }
+
+ LASSERT(req->rq_repbuf);
+ LASSERT(req->rq_repdata == NULL);
+ LASSERT(req->rq_repmsg == NULL);
+
+ if (req->rq_reply_off != 0) {
+ CERROR("early reply with offset %u\n", req->rq_reply_off);
+ spin_unlock(&req->rq_lock);
+ GOTO(err_buf, rc = -EPROTO);
+ }
+
+ if (req->rq_nob_received != early_size) {
+ /* even another early arrived the size should be the same */
+ CERROR("data size has changed from %u to %u\n",
+ early_size, req->rq_nob_received);
+ spin_unlock(&req->rq_lock);
+ GOTO(err_buf, rc = -EINVAL);
+ }
+
+ if (req->rq_nob_received < sizeof(struct lustre_msg)) {
+ CERROR("early reply length %d too small\n",
+ req->rq_nob_received);
+ spin_unlock(&req->rq_lock);
+ GOTO(err_buf, rc = -EALREADY);
+ }
+
+ memcpy(early_buf, req->rq_repbuf, early_size);
+ spin_unlock(&req->rq_lock);
+
+ spin_lock_init(&early_req->rq_lock);
early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
early_req->rq_flvr = req->rq_flvr;
early_req->rq_repbuf = early_buf;
struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
{
- struct ptlrpc_sec *sec;
+ struct ptlrpc_sec *sec;
- cfs_spin_lock(&imp->imp_lock);
- sec = sptlrpc_sec_get(imp->imp_sec);
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ sec = sptlrpc_sec_get(imp->imp_sec);
+ spin_unlock(&imp->imp_lock);
- return sec;
+ return sec;
}
EXPORT_SYMBOL(sptlrpc_import_sec_ref);
static void sptlrpc_import_sec_install(struct obd_import *imp,
struct ptlrpc_sec *sec)
{
- struct ptlrpc_sec *old_sec;
+ struct ptlrpc_sec *old_sec;
- LASSERT_ATOMIC_POS(&sec->ps_refcount);
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
- cfs_spin_lock(&imp->imp_lock);
- old_sec = imp->imp_sec;
- imp->imp_sec = sec;
- cfs_spin_unlock(&imp->imp_lock);
+ spin_lock(&imp->imp_lock);
+ old_sec = imp->imp_sec;
+ imp->imp_sec = sec;
+ spin_unlock(&imp->imp_lock);
if (old_sec) {
sptlrpc_sec_kill(old_sec);
sptlrpc_secflags2str(sf->sf_flags,
str2, sizeof(str2)));
- cfs_spin_lock(&sec->ps_lock);
- flavor_copy(&sec->ps_flvr, sf);
- cfs_spin_unlock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
+ flavor_copy(&sec->ps_flvr, sf);
+ spin_unlock(&sec->ps_lock);
}
/**
sptlrpc_flavor2name(&sf, str, sizeof(str)));
}
- cfs_mutex_lock(&imp->imp_sec_mutex);
+ mutex_lock(&imp->imp_sec_mutex);
newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
if (newsec) {
rc = -EPERM;
}
- cfs_mutex_unlock(&imp->imp_sec_mutex);
+ mutex_unlock(&imp->imp_sec_mutex);
out:
sptlrpc_sec_put(sec);
RETURN(rc);
if (req->rq_ctx_fini)
return 0;
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
/* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
* the first req with the new flavor, then treat it as current flavor,
!(req->rq_ctx_init &&
(req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
req->rq_auth_usr_ost))) {
- cfs_spin_unlock(&exp->exp_lock);
- CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
- req->rq_auth_gss, req->rq_ctx_init,
- req->rq_auth_usr_root, req->rq_auth_usr_mdt,
- req->rq_auth_usr_ost);
- return 0;
- }
+ spin_unlock(&exp->exp_lock);
+ CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
+ req->rq_auth_gss, req->rq_ctx_init,
+ req->rq_auth_usr_root, req->rq_auth_usr_mdt,
+ req->rq_auth_usr_ost);
+ return 0;
+ }
- exp->exp_flvr_adapt = 0;
- cfs_spin_unlock(&exp->exp_lock);
+ exp->exp_flvr_adapt = 0;
+ spin_unlock(&exp->exp_lock);
return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
req->rq_svc_ctx, &flavor);
if (!req->rq_auth_gss || !req->rq_ctx_init ||
(!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
!req->rq_auth_usr_ost)) {
- cfs_spin_unlock(&exp->exp_lock);
- return 0;
- }
-
- /* if flavor just changed, we should not proceed, just leave
- * it and current flavor will be discovered and replaced
- * shortly, and let _this_ rpc pass through */
- if (exp->exp_flvr_changed) {
- LASSERT(exp->exp_flvr_adapt);
- cfs_spin_unlock(&exp->exp_lock);
- return 0;
- }
-
- if (exp->exp_flvr_adapt) {
- exp->exp_flvr_adapt = 0;
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
- exp, exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc);
- flavor = exp->exp_flvr;
- cfs_spin_unlock(&exp->exp_lock);
-
- return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
- req->rq_svc_ctx,
- &flavor);
- } else {
- CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
- "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
- exp->exp_flvr_old[0].sf_rpc,
- exp->exp_flvr_old[1].sf_rpc);
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
+ return 0;
+ }
+
+ /* if flavor just changed, we should not proceed, just leave
+ * it and current flavor will be discovered and replaced
+ * shortly, and let _this_ rpc pass through */
+ if (exp->exp_flvr_changed) {
+ LASSERT(exp->exp_flvr_adapt);
+ spin_unlock(&exp->exp_lock);
+ return 0;
+ }
+
+ if (exp->exp_flvr_adapt) {
+ exp->exp_flvr_adapt = 0;
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
+ exp, exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc,
+ exp->exp_flvr_old[1].sf_rpc);
+ flavor = exp->exp_flvr;
+ spin_unlock(&exp->exp_lock);
+
+ return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
+ req->rq_svc_ctx,
+ &flavor);
+ } else {
+ CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
+ "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
+ exp->exp_flvr_old[0].sf_rpc,
+ exp->exp_flvr_old[1].sf_rpc);
+ spin_unlock(&exp->exp_lock);
return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
req->rq_svc_ctx);
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[0] -
cfs_time_current_sec());
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
return 0;
}
} else {
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[1] -
cfs_time_current_sec());
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
return 0;
}
} else {
exp->exp_flvr_old[1].sf_rpc);
}
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
"unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
LASSERT(obd);
- cfs_spin_lock(&obd->obd_dev_lock);
+ spin_lock(&obd->obd_dev_lock);
- cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
- if (exp->exp_connection == NULL)
- continue;
+ cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ if (exp->exp_connection == NULL)
+ continue;
- /* note if this export had just been updated flavor
- * (exp_flvr_changed == 1), this will override the
- * previous one. */
- cfs_spin_lock(&exp->exp_lock);
+ /* note if this export had just been updated flavor
+ * (exp_flvr_changed == 1), this will override the
+ * previous one. */
+ spin_lock(&exp->exp_lock);
sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
exp->exp_connection->c_peer.nid,
&new_flvr);
exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
}
- cfs_spin_unlock(&exp->exp_lock);
- }
+ spin_unlock(&exp->exp_lock);
+ }
- cfs_spin_unlock(&obd->obd_dev_lock);
+ spin_unlock(&obd->obd_dev_lock);
}
EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
{
int rc;
- cfs_rwlock_init(&policy_lock);
+ rwlock_init(&policy_lock);
rc = sptlrpc_gc_init();
if (rc)
/*
* in-pool pages bookkeeping
*/
- cfs_spinlock_t epp_lock; /* protect following fields */
+ spinlock_t epp_lock; /* protect following fields */
unsigned long epp_total_pages; /* total pages in pools */
unsigned long epp_free_pages; /* current pages available */
{
int rc;
- cfs_spin_lock(&page_pools.epp_lock);
+ spin_lock(&page_pools.epp_lock);
rc = snprintf(page, count,
"physical pages: %lu\n"
page_pools.epp_st_max_wait, CFS_HZ
);
- cfs_spin_unlock(&page_pools.epp_lock);
- return rc;
+ spin_unlock(&page_pools.epp_lock);
+ return rc;
}
static void enc_pools_release_free_pages(long npages)
*/
static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
{
- if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
- cfs_spin_lock(&page_pools.epp_lock);
+ if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
+ spin_lock(&page_pools.epp_lock);
shrink_param(sc, nr_to_scan) = min_t(unsigned long,
shrink_param(sc, nr_to_scan),
page_pools.epp_free_pages -
page_pools.epp_st_shrinks++;
page_pools.epp_last_shrink = cfs_time_current_sec();
}
- cfs_spin_unlock(&page_pools.epp_lock);
- }
+ spin_unlock(&page_pools.epp_lock);
+ }
- /*
- * if no pool access for a long time, we consider it's fully idle.
- * a little race here is fine.
- */
- if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
- CACHE_QUIESCENT_PERIOD)) {
- cfs_spin_lock(&page_pools.epp_lock);
- page_pools.epp_idle_idx = IDLE_IDX_MAX;
- cfs_spin_unlock(&page_pools.epp_lock);
- }
+ /*
+ * if no pool access for a long time, we consider it's fully idle.
+ * a little race here is fine.
+ */
+ if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ CACHE_QUIESCENT_PERIOD)) {
+ spin_lock(&page_pools.epp_lock);
+ page_pools.epp_idle_idx = IDLE_IDX_MAX;
+ spin_unlock(&page_pools.epp_lock);
+ }
- LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return max((int) page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
- (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+ LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+ return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+ (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
}
static inline
LASSERT(npages_to_npools(npages) == npools);
LASSERT(page_pools.epp_growing);
- cfs_spin_lock(&page_pools.epp_lock);
+ spin_lock(&page_pools.epp_lock);
/*
* (1) fill all the free slots of current pools.
CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
page_pools.epp_total_pages);
- cfs_spin_unlock(&page_pools.epp_lock);
+ spin_unlock(&page_pools.epp_lock);
}
static int enc_pools_add_pages(int npages)
{
- static CFS_DEFINE_MUTEX(add_pages_mutex);
- cfs_page_t ***pools;
- int npools, alloced = 0;
- int i, j, rc = -ENOMEM;
+ static DEFINE_MUTEX(add_pages_mutex);
+ cfs_page_t ***pools;
+ int npools, alloced = 0;
+ int i, j, rc = -ENOMEM;
- if (npages < PTLRPC_MAX_BRW_PAGES)
- npages = PTLRPC_MAX_BRW_PAGES;
+ if (npages < PTLRPC_MAX_BRW_PAGES)
+ npages = PTLRPC_MAX_BRW_PAGES;
- cfs_mutex_lock(&add_pages_mutex);
+ mutex_lock(&add_pages_mutex);
if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
CERROR("Failed to allocate %d enc pages\n", npages);
}
- cfs_mutex_unlock(&add_pages_mutex);
+ mutex_unlock(&add_pages_mutex);
return rc;
}
if (desc->bd_enc_iov == NULL)
return -ENOMEM;
- cfs_spin_lock(&page_pools.epp_lock);
+ spin_lock(&page_pools.epp_lock);
page_pools.epp_st_access++;
again:
if (enc_pools_should_grow(desc->bd_iov_count, now)) {
page_pools.epp_growing = 1;
- cfs_spin_unlock(&page_pools.epp_lock);
- enc_pools_add_pages(page_pools.epp_pages_short / 2);
- cfs_spin_lock(&page_pools.epp_lock);
+ spin_unlock(&page_pools.epp_lock);
+ enc_pools_add_pages(page_pools.epp_pages_short / 2);
+ spin_lock(&page_pools.epp_lock);
page_pools.epp_growing = 0;
cfs_waitlink_init(&waitlink);
cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
- cfs_spin_unlock(&page_pools.epp_lock);
- cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
- cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
- LASSERT(page_pools.epp_waitqlen > 0);
- cfs_spin_lock(&page_pools.epp_lock);
+ spin_unlock(&page_pools.epp_lock);
+ cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
+ cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
+ LASSERT(page_pools.epp_waitqlen > 0);
+ spin_lock(&page_pools.epp_lock);
page_pools.epp_waitqlen--;
}
page_pools.epp_last_access = cfs_time_current_sec();
- cfs_spin_unlock(&page_pools.epp_lock);
- return 0;
+ spin_unlock(&page_pools.epp_lock);
+ return 0;
}
EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
LASSERT(desc->bd_iov_count > 0);
- cfs_spin_lock(&page_pools.epp_lock);
+ spin_lock(&page_pools.epp_lock);
p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
enc_pools_wakeup();
- cfs_spin_unlock(&page_pools.epp_lock);
+ spin_unlock(&page_pools.epp_lock);
- OBD_FREE(desc->bd_enc_iov,
- desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
- desc->bd_enc_iov = NULL;
+ OBD_FREE(desc->bd_enc_iov,
+ desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
+ desc->bd_enc_iov = NULL;
}
EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
*/
int sptlrpc_enc_pool_add_user(void)
{
- int need_grow = 0;
+ int need_grow = 0;
- cfs_spin_lock(&page_pools.epp_lock);
- if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
- page_pools.epp_growing = 1;
- need_grow = 1;
- }
- cfs_spin_unlock(&page_pools.epp_lock);
+ spin_lock(&page_pools.epp_lock);
+ if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
+ page_pools.epp_growing = 1;
+ need_grow = 1;
+ }
+ spin_unlock(&page_pools.epp_lock);
- if (need_grow) {
- enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
- PTLRPC_MAX_BRW_PAGES);
+ if (need_grow) {
+ enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
+ PTLRPC_MAX_BRW_PAGES);
- cfs_spin_lock(&page_pools.epp_lock);
- page_pools.epp_growing = 0;
- enc_pools_wakeup();
- cfs_spin_unlock(&page_pools.epp_lock);
- }
- return 0;
+ spin_lock(&page_pools.epp_lock);
+ page_pools.epp_growing = 0;
+ enc_pools_wakeup();
+ spin_unlock(&page_pools.epp_lock);
+ }
+ return 0;
}
EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
page_pools.epp_last_shrink = cfs_time_current_sec();
page_pools.epp_last_access = cfs_time_current_sec();
- cfs_spin_lock_init(&page_pools.epp_lock);
+ spin_lock_init(&page_pools.epp_lock);
page_pools.epp_total_pages = 0;
page_pools.epp_free_pages = 0;
cfs_list_t sc_tgts; /* target-specific rules */
};
-static cfs_mutex_t sptlrpc_conf_lock;
+static struct mutex sptlrpc_conf_lock;
static CFS_LIST_HEAD(sptlrpc_confs);
static inline int is_hex(char c)
if (conf == NULL) {
target2fsname(target, fsname, sizeof(fsname));
- cfs_mutex_lock(&sptlrpc_conf_lock);
+ mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf == NULL) {
CERROR("can't find conf\n");
} else {
rc = sptlrpc_conf_merge_rule(conf, target, &rule);
}
- cfs_mutex_unlock(&sptlrpc_conf_lock);
+ mutex_unlock(&sptlrpc_conf_lock);
} else {
- LASSERT(cfs_mutex_is_locked(&sptlrpc_conf_lock));
+ LASSERT(mutex_is_locked(&sptlrpc_conf_lock));
rc = sptlrpc_conf_merge_rule(conf, target, &rule);
}
if (logname2fsname(logname, fsname, sizeof(fsname)))
return;
- cfs_mutex_lock(&sptlrpc_conf_lock);
+ mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf && conf->sc_local) {
}
conf->sc_modified = 0;
- cfs_mutex_unlock(&sptlrpc_conf_lock);
+ mutex_unlock(&sptlrpc_conf_lock);
}
EXPORT_SYMBOL(sptlrpc_conf_log_update_begin);
if (logname2fsname(logname, fsname, sizeof(fsname)))
return;
- cfs_mutex_lock(&sptlrpc_conf_lock);
+ mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf) {
conf->sc_updated = 1;
}
- cfs_mutex_unlock(&sptlrpc_conf_lock);
+ mutex_unlock(&sptlrpc_conf_lock);
}
EXPORT_SYMBOL(sptlrpc_conf_log_update_end);
if (logname2fsname(logname, fsname, sizeof(fsname)))
return;
- cfs_mutex_lock(&sptlrpc_conf_lock);
+ mutex_lock(&sptlrpc_conf_lock);
sptlrpc_conf_get(fsname, 1);
- cfs_mutex_unlock(&sptlrpc_conf_lock);
+ mutex_unlock(&sptlrpc_conf_lock);
}
EXPORT_SYMBOL(sptlrpc_conf_log_start);
if (logname2fsname(logname, fsname, sizeof(fsname)))
return;
- cfs_mutex_lock(&sptlrpc_conf_lock);
+ mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf)
sptlrpc_conf_free(conf);
- cfs_mutex_unlock(&sptlrpc_conf_lock);
+ mutex_unlock(&sptlrpc_conf_lock);
}
EXPORT_SYMBOL(sptlrpc_conf_log_stop);
target2fsname(target->uuid, name, sizeof(name));
- cfs_mutex_lock(&sptlrpc_conf_lock);
+ mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(name, 0);
if (conf == NULL)
rc = sptlrpc_rule_set_choose(&conf->sc_rset, from, to, nid, sf);
out:
- cfs_mutex_unlock(&sptlrpc_conf_lock);
+ mutex_unlock(&sptlrpc_conf_lock);
if (rc == 0)
get_default_flavor(sf);
CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
/* serialize with connect/disconnect import */
- cfs_down_read(&obd->u.cli.cl_sem);
-
- imp = obd->u.cli.cl_import;
- if (imp) {
- cfs_spin_lock(&imp->imp_lock);
- if (imp->imp_sec)
- imp->imp_sec_expire = cfs_time_current_sec() +
- SEC_ADAPT_DELAY;
- cfs_spin_unlock(&imp->imp_lock);
- }
+ down_read(&obd->u.cli.cl_sem);
+
+ imp = obd->u.cli.cl_import;
+ if (imp) {
+ spin_lock(&imp->imp_lock);
+ if (imp->imp_sec)
+ imp->imp_sec_expire = cfs_time_current_sec() +
+ SEC_ADAPT_DELAY;
+ spin_unlock(&imp->imp_lock);
+ }
- cfs_up_read(&obd->u.cli.cl_sem);
- EXIT;
+ up_read(&obd->u.cli.cl_sem);
+ EXIT;
}
EXPORT_SYMBOL(sptlrpc_conf_client_adapt);
target2fsname(obd->obd_uuid.uuid, fsname, sizeof(fsname));
- cfs_mutex_lock(&sptlrpc_conf_lock);
+ mutex_lock(&sptlrpc_conf_lock);
conf = sptlrpc_conf_get(fsname, 0);
if (conf == NULL) {
conf_tgt ? &conf_tgt->sct_rset: NULL,
LUSTRE_SP_ANY, sp_dst, rset);
out:
- cfs_mutex_unlock(&sptlrpc_conf_lock);
+ mutex_unlock(&sptlrpc_conf_lock);
RETURN(rc);
}
EXPORT_SYMBOL(sptlrpc_conf_target_get_rules);
int sptlrpc_conf_init(void)
{
- cfs_mutex_init(&sptlrpc_conf_lock);
+ mutex_init(&sptlrpc_conf_lock);
return 0;
}
{
struct sptlrpc_conf *conf, *conf_next;
- cfs_mutex_lock(&sptlrpc_conf_lock);
+ mutex_lock(&sptlrpc_conf_lock);
cfs_list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
sptlrpc_conf_free(conf);
}
LASSERT(cfs_list_empty(&sptlrpc_confs));
- cfs_mutex_unlock(&sptlrpc_conf_lock);
+ mutex_unlock(&sptlrpc_conf_lock);
}
#ifdef __KERNEL__
-static cfs_mutex_t sec_gc_mutex;
+static struct mutex sec_gc_mutex;
static CFS_LIST_HEAD(sec_gc_list);
-static cfs_spinlock_t sec_gc_list_lock;
+static spinlock_t sec_gc_list_lock;
static CFS_LIST_HEAD(sec_gc_ctx_list);
-static cfs_spinlock_t sec_gc_ctx_list_lock;
+static spinlock_t sec_gc_ctx_list_lock;
static struct ptlrpc_thread sec_gc_thread;
static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
- cfs_spin_lock(&sec_gc_list_lock);
- cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
- cfs_spin_unlock(&sec_gc_list_lock);
+ spin_lock(&sec_gc_list_lock);
+ cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
+ spin_unlock(&sec_gc_list_lock);
- CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+ CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
}
EXPORT_SYMBOL(sptlrpc_gc_add_sec);
/* signal before list_del to make iteration in gc thread safe */
cfs_atomic_inc(&sec_gc_wait_del);
- cfs_spin_lock(&sec_gc_list_lock);
- cfs_list_del_init(&sec->ps_gc_list);
- cfs_spin_unlock(&sec_gc_list_lock);
+ spin_lock(&sec_gc_list_lock);
+ cfs_list_del_init(&sec->ps_gc_list);
+ spin_unlock(&sec_gc_list_lock);
- /* barrier */
- cfs_mutex_lock(&sec_gc_mutex);
- cfs_mutex_unlock(&sec_gc_mutex);
+ /* barrier */
+ mutex_lock(&sec_gc_mutex);
+ mutex_unlock(&sec_gc_mutex);
- cfs_atomic_dec(&sec_gc_wait_del);
+ cfs_atomic_dec(&sec_gc_wait_del);
- CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+ CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
}
EXPORT_SYMBOL(sptlrpc_gc_del_sec);
void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
+ LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
- CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- cfs_spin_lock(&sec_gc_ctx_list_lock);
- cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
- cfs_spin_unlock(&sec_gc_ctx_list_lock);
+ CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ spin_lock(&sec_gc_ctx_list_lock);
+ cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+ spin_unlock(&sec_gc_ctx_list_lock);
- thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
- cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+ thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
+ cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
}
EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
static void sec_process_ctx_list(void)
{
- struct ptlrpc_cli_ctx *ctx;
+ struct ptlrpc_cli_ctx *ctx;
- cfs_spin_lock(&sec_gc_ctx_list_lock);
+ spin_lock(&sec_gc_ctx_list_lock);
- while (!cfs_list_empty(&sec_gc_ctx_list)) {
- ctx = cfs_list_entry(sec_gc_ctx_list.next,
- struct ptlrpc_cli_ctx, cc_gc_chain);
- cfs_list_del_init(&ctx->cc_gc_chain);
- cfs_spin_unlock(&sec_gc_ctx_list_lock);
+ while (!cfs_list_empty(&sec_gc_ctx_list)) {
+ ctx = cfs_list_entry(sec_gc_ctx_list.next,
+ struct ptlrpc_cli_ctx, cc_gc_chain);
+ cfs_list_del_init(&ctx->cc_gc_chain);
+ spin_unlock(&sec_gc_ctx_list_lock);
- LASSERT(ctx->cc_sec);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
- CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
- ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- sptlrpc_cli_ctx_put(ctx, 1);
+ LASSERT(ctx->cc_sec);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
+ CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
+ ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+ sptlrpc_cli_ctx_put(ctx, 1);
- cfs_spin_lock(&sec_gc_ctx_list_lock);
- }
+ spin_lock(&sec_gc_ctx_list_lock);
+ }
- cfs_spin_unlock(&sec_gc_ctx_list_lock);
+ spin_unlock(&sec_gc_ctx_list_lock);
}
static void sec_do_gc(struct ptlrpc_sec *sec)
* to trace each sec as order of expiry time.
* another issue here is we wakeup as fixed interval instead of
* according to each sec's expiry time */
- cfs_mutex_lock(&sec_gc_mutex);
+ mutex_lock(&sec_gc_mutex);
cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
/* if someone is waiting to be deleted, let it
* proceed as soon as possible. */
if (cfs_atomic_read(&sec_gc_wait_del)) {
CDEBUG(D_SEC, "deletion pending, start over\n");
- cfs_mutex_unlock(&sec_gc_mutex);
+ mutex_unlock(&sec_gc_mutex);
goto again;
}
sec_do_gc(sec);
}
- cfs_mutex_unlock(&sec_gc_mutex);
+ mutex_unlock(&sec_gc_mutex);
/* check ctx list again before sleep */
sec_process_ctx_list();
int sptlrpc_gc_init(void)
{
- struct l_wait_info lwi = { 0 };
- int rc;
+ struct l_wait_info lwi = { 0 };
+ int rc;
- cfs_mutex_init(&sec_gc_mutex);
- cfs_spin_lock_init(&sec_gc_list_lock);
- cfs_spin_lock_init(&sec_gc_ctx_list_lock);
+ mutex_init(&sec_gc_mutex);
+ spin_lock_init(&sec_gc_list_lock);
+ spin_lock_init(&sec_gc_ctx_list_lock);
/* initialize thread control */
memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
null_sec.ps_flvr.sf_flags = 0;
null_sec.ps_part = LUSTRE_SP_ANY;
null_sec.ps_dying = 0;
- cfs_spin_lock_init(&null_sec.ps_lock);
+ spin_lock_init(&null_sec.ps_lock);
cfs_atomic_set(&null_sec.ps_nctx, 1); /* for "null_cli_ctx" */
CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
null_sec.ps_gc_interval = 0;
null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
PTLRPC_CTX_UPTODATE;
null_cli_ctx.cc_vcred.vc_uid = 0;
- cfs_spin_lock_init(&null_cli_ctx.cc_lock);
- CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
- CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
+ spin_lock_init(&null_cli_ctx.cc_lock);
+ CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
+ CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
}
int sptlrpc_null_init(void)
struct plain_sec {
struct ptlrpc_sec pls_base;
- cfs_rwlock_t pls_lock;
+ rwlock_t pls_lock;
struct ptlrpc_cli_ctx *pls_ctx;
};
static
struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
{
- struct ptlrpc_cli_ctx *ctx, *ctx_new;
+ struct ptlrpc_cli_ctx *ctx, *ctx_new;
- OBD_ALLOC_PTR(ctx_new);
+ OBD_ALLOC_PTR(ctx_new);
- cfs_write_lock(&plsec->pls_lock);
+ write_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
if (ctx) {
ctx->cc_expire = 0;
ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
ctx->cc_vcred.vc_uid = 0;
- cfs_spin_lock_init(&ctx->cc_lock);
+ spin_lock_init(&ctx->cc_lock);
CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
cfs_atomic_inc(&ctx->cc_refcount); /* for caller */
}
- cfs_write_unlock(&plsec->pls_lock);
+ write_unlock(&plsec->pls_lock);
- return ctx;
+ return ctx;
}
static
/*
* initialize plain_sec
*/
- cfs_rwlock_init(&plsec->pls_lock);
- plsec->pls_ctx = NULL;
-
- sec = &plsec->pls_base;
- sec->ps_policy = &plain_policy;
- cfs_atomic_set(&sec->ps_refcount, 0);
- cfs_atomic_set(&sec->ps_nctx, 0);
- sec->ps_id = sptlrpc_get_next_secid();
- sec->ps_import = class_import_get(imp);
- sec->ps_flvr = *sf;
- cfs_spin_lock_init(&sec->ps_lock);
+ rwlock_init(&plsec->pls_lock);
+ plsec->pls_ctx = NULL;
+
+ sec = &plsec->pls_base;
+ sec->ps_policy = &plain_policy;
+ cfs_atomic_set(&sec->ps_refcount, 0);
+ cfs_atomic_set(&sec->ps_nctx, 0);
+ sec->ps_id = sptlrpc_get_next_secid();
+ sec->ps_import = class_import_get(imp);
+ sec->ps_flvr = *sf;
+ spin_lock_init(&sec->ps_lock);
CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
sec->ps_gc_interval = 0;
sec->ps_gc_next = 0;
struct vfs_cred *vcred,
int create, int remove_dead)
{
- struct plain_sec *plsec = sec2plsec(sec);
- struct ptlrpc_cli_ctx *ctx;
- ENTRY;
+ struct plain_sec *plsec = sec2plsec(sec);
+ struct ptlrpc_cli_ctx *ctx;
+ ENTRY;
- cfs_read_lock(&plsec->pls_lock);
- ctx = plsec->pls_ctx;
- if (ctx)
- cfs_atomic_inc(&ctx->cc_refcount);
- cfs_read_unlock(&plsec->pls_lock);
+ read_lock(&plsec->pls_lock);
+ ctx = plsec->pls_ctx;
+ if (ctx)
+ cfs_atomic_inc(&ctx->cc_refcount);
+ read_unlock(&plsec->pls_lock);
- if (unlikely(ctx == NULL))
- ctx = plain_sec_install_ctx(plsec);
+ if (unlikely(ctx == NULL))
+ ctx = plain_sec_install_ctx(plsec);
- RETURN(ctx);
+ RETURN(ctx);
}
static
if (uid != -1)
RETURN(0);
- cfs_write_lock(&plsec->pls_lock);
+ write_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
plsec->pls_ctx = NULL;
- cfs_write_unlock(&plsec->pls_lock);
+ write_unlock(&plsec->pls_lock);
if (ctx)
sptlrpc_cli_ctx_put(ctx, 1);
static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
static CFS_LIST_HEAD(ptlrpc_all_services);
-cfs_spinlock_t ptlrpc_all_services_lock;
+spinlock_t ptlrpc_all_services_lock;
struct ptlrpc_request_buffer_desc *
ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
return NULL;
}
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
svcpt->scp_nrqbds_total++;
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
return rqbd;
}
LASSERT(rqbd->rqbd_refcount == 0);
LASSERT(cfs_list_empty(&rqbd->rqbd_reqs));
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
cfs_list_del(&rqbd->rqbd_list);
svcpt->scp_nrqbds_total--;
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
OBD_FREE_LARGE(rqbd->rqbd_buffer, svcpt->scp_service->srv_buf_size);
OBD_FREE_PTR(rqbd);
struct ptlrpc_hr_thread {
int hrt_id; /* thread ID */
- cfs_spinlock_t hrt_lock;
+ spinlock_t hrt_lock;
cfs_waitq_t hrt_waitq;
cfs_list_t hrt_queue; /* RS queue */
struct ptlrpc_hr_partition *hrt_partition;
hrt = ptlrpc_hr_select(b->rsb_svcpt);
- cfs_spin_lock(&hrt->hrt_lock);
+ spin_lock(&hrt->hrt_lock);
cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
- cfs_spin_unlock(&hrt->hrt_lock);
+ spin_unlock(&hrt->hrt_lock);
cfs_waitq_signal(&hrt->hrt_waitq);
b->rsb_n_replies = 0;
if (svcpt != b->rsb_svcpt || b->rsb_n_replies >= MAX_SCHEDULED) {
if (b->rsb_svcpt != NULL) {
rs_batch_dispatch(b);
- cfs_spin_unlock(&b->rsb_svcpt->scp_rep_lock);
+ spin_unlock(&b->rsb_svcpt->scp_rep_lock);
}
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
b->rsb_svcpt = svcpt;
- }
- cfs_spin_lock(&rs->rs_lock);
- rs->rs_scheduled_ever = 1;
- if (rs->rs_scheduled == 0) {
- cfs_list_move(&rs->rs_list, &b->rsb_replies);
- rs->rs_scheduled = 1;
- b->rsb_n_replies++;
- }
- rs->rs_committed = 1;
- cfs_spin_unlock(&rs->rs_lock);
+ }
+ spin_lock(&rs->rs_lock);
+ rs->rs_scheduled_ever = 1;
+ if (rs->rs_scheduled == 0) {
+ cfs_list_move(&rs->rs_list, &b->rsb_replies);
+ rs->rs_scheduled = 1;
+ b->rsb_n_replies++;
+ }
+ rs->rs_committed = 1;
+ spin_unlock(&rs->rs_lock);
}
/**
{
if (b->rsb_svcpt != NULL) {
rs_batch_dispatch(b);
- cfs_spin_unlock(&b->rsb_svcpt->scp_rep_lock);
+ spin_unlock(&b->rsb_svcpt->scp_rep_lock);
}
}
hrt = ptlrpc_hr_select(rs->rs_svcpt);
- cfs_spin_lock(&hrt->hrt_lock);
+ spin_lock(&hrt->hrt_lock);
cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
- cfs_spin_unlock(&hrt->hrt_lock);
+ spin_unlock(&hrt->hrt_lock);
cfs_waitq_signal(&hrt->hrt_waitq);
EXIT;
* to attend to complete them. */
/* CAVEAT EMPTOR: spinlock ordering!!! */
- cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
+ spin_lock(&exp->exp_uncommitted_replies_lock);
cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
rs_obd_list) {
LASSERT (rs->rs_difficult);
rs_batch_add(&batch, rs);
}
}
- cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
- rs_batch_fini(&batch);
- EXIT;
+ spin_unlock(&exp->exp_uncommitted_replies_lock);
+ rs_batch_fini(&batch);
+ EXIT;
}
EXPORT_SYMBOL(ptlrpc_commit_replies);
int posted = 0;
for (;;) {
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
if (cfs_list_empty(&svcpt->scp_rqbd_idle)) {
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
return posted;
}
svcpt->scp_nrqbds_posted++;
cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
rc = ptlrpc_register_rqbd(rqbd);
if (rc != 0)
posted = 1;
}
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
svcpt->scp_nrqbds_posted--;
cfs_list_del(&rqbd->rqbd_list);
/* Don't complain if no request buffers are posted right now; LNET
* won't drop requests because we set the portal lazy! */
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
return -1;
}
CFS_INIT_LIST_HEAD(&svcpt->scp_threads);
/* rqbd and incoming request queue */
- cfs_spin_lock_init(&svcpt->scp_lock);
+ spin_lock_init(&svcpt->scp_lock);
CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
/* acitve requests and hp requests */
- cfs_spin_lock_init(&svcpt->scp_req_lock);
+ spin_lock_init(&svcpt->scp_req_lock);
CFS_INIT_LIST_HEAD(&svcpt->scp_req_pending);
CFS_INIT_LIST_HEAD(&svcpt->scp_hreq_pending);
/* reply states */
- cfs_spin_lock_init(&svcpt->scp_rep_lock);
+ spin_lock_init(&svcpt->scp_rep_lock);
CFS_INIT_LIST_HEAD(&svcpt->scp_rep_active);
#ifndef __KERNEL__
CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
/* adaptive timeout */
- cfs_spin_lock_init(&svcpt->scp_at_lock);
+ spin_lock_init(&svcpt->scp_at_lock);
array = &svcpt->scp_at_array;
size = at_est2timeout(at_max);
service->srv_cpt_bits++;
/* public members */
- cfs_spin_lock_init(&service->srv_lock);
+ spin_lock_init(&service->srv_lock);
service->srv_name = conf->psc_name;
service->srv_watchdog_factor = conf->psc_watchdog_factor;
CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
rc = LNetSetLazyPortal(service->srv_req_portal);
LASSERT(rc == 0);
- cfs_spin_lock (&ptlrpc_all_services_lock);
+ spin_lock(&ptlrpc_all_services_lock);
cfs_list_add (&service->srv_list, &ptlrpc_all_services);
- cfs_spin_unlock (&ptlrpc_all_services_lock);
+ spin_unlock(&ptlrpc_all_services_lock);
if (proc_entry != NULL)
ptlrpc_lprocfs_register_service(proc_entry, service);
return;
if (req->rq_at_linked) {
- cfs_spin_lock(&svcpt->scp_at_lock);
+ spin_lock(&svcpt->scp_at_lock);
/* recheck with lock, in case it's unlinked by
* ptlrpc_at_check_timed() */
if (likely(req->rq_at_linked))
ptlrpc_at_remove_timed(req);
- cfs_spin_unlock(&svcpt->scp_at_lock);
+ spin_unlock(&svcpt->scp_at_lock);
}
LASSERT(cfs_list_empty(&req->rq_timed_list));
req->rq_export = NULL;
}
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
cfs_list_del(&req->rq_history_list);
}
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
req = cfs_list_entry(rqbd->rqbd_reqs.next,
ptlrpc_server_free_request(req);
}
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
/*
* now all reqs including the embedded req has been
* disposed, schedule request buffer for re-use.
&svcpt->scp_rqbd_idle);
}
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
} else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
/* If we are low on memory, we are not interested in history */
cfs_list_del(&req->rq_list);
if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
svcpt->scp_hist_seq_culled = req->rq_history_seq;
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
ptlrpc_server_free_request(req);
} else {
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
}
}
{
ptlrpc_server_hpreq_fini(req);
- cfs_spin_lock(&svcpt->scp_req_lock);
+ spin_lock(&svcpt->scp_req_lock);
svcpt->scp_nreqs_active--;
if (req->rq_hp)
svcpt->scp_nhreqs_active--;
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
ptlrpc_server_drop_request(req);
}
/* exports may get disconnected from the chain even though the
export has references, so we must keep the spin lock while
manipulating the lists */
- cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
+ spin_lock(&exp->exp_obd->obd_dev_lock);
- if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
- /* this one is not timed */
- cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
+ if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
+ /* this one is not timed */
+ spin_unlock(&exp->exp_obd->obd_dev_lock);
RETURN_EXIT;
}
oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
struct obd_export, exp_obd_chain_timed);
oldest_time = oldest_exp->exp_last_request_time;
- cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
+ spin_unlock(&exp->exp_obd->obd_dev_lock);
if (exp->exp_obd->obd_recovering) {
/* be nice to everyone during recovery */
if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
return(-ENOSYS);
- cfs_spin_lock(&svcpt->scp_at_lock);
+ spin_lock(&svcpt->scp_at_lock);
LASSERT(cfs_list_empty(&req->rq_timed_list));
index = (unsigned long)req->rq_deadline % array->paa_size;
cfs_list_add(&req->rq_timed_list,
&array->paa_reqs_array[index]);
- cfs_spin_lock(&req->rq_lock);
- req->rq_at_linked = 1;
- cfs_spin_unlock(&req->rq_lock);
- req->rq_at_index = index;
- array->paa_reqs_count[index]++;
- array->paa_count++;
- if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
- array->paa_deadline = req->rq_deadline;
+ spin_lock(&req->rq_lock);
+ req->rq_at_linked = 1;
+ spin_unlock(&req->rq_lock);
+ req->rq_at_index = index;
+ array->paa_reqs_count[index]++;
+ array->paa_count++;
+ if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
+ array->paa_deadline = req->rq_deadline;
ptlrpc_at_set_timer(svcpt);
}
- cfs_spin_unlock(&svcpt->scp_at_lock);
+ spin_unlock(&svcpt->scp_at_lock);
return 0;
}
LASSERT(!cfs_list_empty(&req->rq_timed_list));
cfs_list_del_init(&req->rq_timed_list);
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
req->rq_at_linked = 0;
- cfs_spin_unlock(&req->rq_lock);
+ spin_unlock(&req->rq_lock);
array->paa_reqs_count[req->rq_at_index]--;
array->paa_count--;
int first, counter = 0;
ENTRY;
- cfs_spin_lock(&svcpt->scp_at_lock);
+ spin_lock(&svcpt->scp_at_lock);
if (svcpt->scp_at_check == 0) {
- cfs_spin_unlock(&svcpt->scp_at_lock);
+ spin_unlock(&svcpt->scp_at_lock);
RETURN(0);
}
delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
svcpt->scp_at_check = 0;
if (array->paa_count == 0) {
- cfs_spin_unlock(&svcpt->scp_at_lock);
+ spin_unlock(&svcpt->scp_at_lock);
RETURN(0);
}
if (first > at_early_margin) {
/* We've still got plenty of time. Reset the timer. */
ptlrpc_at_set_timer(svcpt);
- cfs_spin_unlock(&svcpt->scp_at_lock);
+ spin_unlock(&svcpt->scp_at_lock);
RETURN(0);
}
/* we have a new earliest deadline, restart the timer */
ptlrpc_at_set_timer(svcpt);
- cfs_spin_unlock(&svcpt->scp_at_lock);
+ spin_unlock(&svcpt->scp_at_lock);
CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
"replies\n", first, at_extra, counter);
if (req->rq_ops->hpreq_check)
rc = req->rq_ops->hpreq_check(req);
- cfs_spin_lock_bh(&req->rq_export->exp_rpc_lock);
- cfs_list_add(&req->rq_exp_list,
- &req->rq_export->exp_hp_rpcs);
- cfs_spin_unlock_bh(&req->rq_export->exp_rpc_lock);
- }
+ spin_lock_bh(&req->rq_export->exp_rpc_lock);
+ cfs_list_add(&req->rq_exp_list,
+ &req->rq_export->exp_hp_rpcs);
+ spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+ }
- RETURN(rc);
+ RETURN(rc);
}
/** Remove the request from the export list. */
if (req->rq_ops->hpreq_fini)
req->rq_ops->hpreq_fini(req);
- cfs_spin_lock_bh(&req->rq_export->exp_rpc_lock);
- cfs_list_del_init(&req->rq_exp_list);
- cfs_spin_unlock_bh(&req->rq_export->exp_rpc_lock);
- }
- EXIT;
+ spin_lock_bh(&req->rq_export->exp_rpc_lock);
+ cfs_list_del_init(&req->rq_exp_list);
+ spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+ }
+ EXIT;
}
static int ptlrpc_hpreq_check(struct ptlrpc_request *req)
static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
{
- ENTRY;
+ ENTRY;
- cfs_spin_lock(&req->rq_lock);
+ spin_lock(&req->rq_lock);
if (req->rq_hp == 0) {
int opc = lustre_msg_get_opc(req->rq_reqmsg);
if (opc != OBD_PING)
DEBUG_REQ(D_RPCTRACE, req, "high priority req");
}
- cfs_spin_unlock(&req->rq_lock);
- EXIT;
+ spin_unlock(&req->rq_lock);
+ EXIT;
}
/**
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
ENTRY;
- cfs_spin_lock(&svcpt->scp_req_lock);
+ spin_lock(&svcpt->scp_req_lock);
/* It may happen that the request is already taken for the processing
* but still in the export list, or the request is not in the request
* queue but in the export list already, do not add it into the
* HP list. */
if (!cfs_list_empty(&req->rq_list))
ptlrpc_hpreq_reorder_nolock(svcpt, req);
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
EXIT;
}
EXPORT_SYMBOL(ptlrpc_hpreq_reorder);
if (rc < 0)
RETURN(rc);
- cfs_spin_lock(&svcpt->scp_req_lock);
+ spin_lock(&svcpt->scp_req_lock);
if (rc)
ptlrpc_hpreq_reorder_nolock(svcpt, req);
else
cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_pending);
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
RETURN(0);
}
int rc;
ENTRY;
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
if (cfs_list_empty(&svcpt->scp_req_incoming)) {
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
RETURN(0);
}
svcpt->scp_nreqs_incoming--;
/* Consider this still a "queued" request as far as stats are
* concerned */
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
/* go through security check/transform */
rc = sptlrpc_svc_unwrap_request(req);
err_req:
if (req->rq_export)
class_export_rpc_put(req->rq_export);
- cfs_spin_lock(&svcpt->scp_req_lock);
+ spin_lock(&svcpt->scp_req_lock);
svcpt->scp_nreqs_active++;
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
ptlrpc_server_finish_request(svcpt, req);
RETURN(1);
int fail_opc = 0;
ENTRY;
- cfs_spin_lock(&svcpt->scp_req_lock);
+ spin_lock(&svcpt->scp_req_lock);
#ifndef __KERNEL__
/* !@%$# liblustre only has 1 thread */
if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) {
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
RETURN(0);
}
#endif
request = ptlrpc_server_request_get(svcpt, 0);
if (request == NULL) {
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
RETURN(0);
}
if (unlikely(fail_opc)) {
if (request->rq_export && request->rq_ops) {
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
OBD_FAIL_TIMEOUT(fail_opc, 4);
- cfs_spin_lock(&svcpt->scp_req_lock);
+ spin_lock(&svcpt->scp_req_lock);
request = ptlrpc_server_request_get(svcpt, 0);
if (request == NULL) {
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
RETURN(0);
}
}
if (request->rq_hp)
svcpt->scp_nhreqs_active++;
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
LASSERT (rs->rs_scheduled);
LASSERT (cfs_list_empty(&rs->rs_list));
- cfs_spin_lock (&exp->exp_lock);
- /* Noop if removed already */
- cfs_list_del_init (&rs->rs_exp_list);
- cfs_spin_unlock (&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
+ /* Noop if removed already */
+ cfs_list_del_init (&rs->rs_exp_list);
+ spin_unlock(&exp->exp_lock);
/* The disk commit callback holds exp_uncommitted_replies_lock while it
* iterates over newly committed replies, removing them from
* holding rs_lock, we can be sure it has all completed once we hold
* rs_lock, which we do right next.
*/
- if (!rs->rs_committed) {
- cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
- cfs_list_del_init(&rs->rs_obd_list);
- cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
- }
+ if (!rs->rs_committed) {
+ spin_lock(&exp->exp_uncommitted_replies_lock);
+ cfs_list_del_init(&rs->rs_obd_list);
+ spin_unlock(&exp->exp_uncommitted_replies_lock);
+ }
- cfs_spin_lock(&rs->rs_lock);
+ spin_lock(&rs->rs_lock);
been_handled = rs->rs_handled;
rs->rs_handled = 1;
}
if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
- cfs_spin_unlock(&rs->rs_lock);
+ spin_unlock(&rs->rs_lock);
- if (!been_handled && rs->rs_on_net) {
- LNetMDUnlink(rs->rs_md_h);
- /* Ignore return code; we're racing with
- * completion... */
- }
+ if (!been_handled && rs->rs_on_net) {
+ LNetMDUnlink(rs->rs_md_h);
+ /* Ignore return code; we're racing with completion */
+ }
- while (nlocks-- > 0)
- ldlm_lock_decref(&rs->rs_locks[nlocks],
- rs->rs_modes[nlocks]);
+ while (nlocks-- > 0)
+ ldlm_lock_decref(&rs->rs_locks[nlocks],
+ rs->rs_modes[nlocks]);
- cfs_spin_lock(&rs->rs_lock);
- }
+ spin_lock(&rs->rs_lock);
+ }
- rs->rs_scheduled = 0;
+ rs->rs_scheduled = 0;
- if (!rs->rs_on_net) {
- /* Off the net */
- cfs_spin_unlock(&rs->rs_lock);
+ if (!rs->rs_on_net) {
+ /* Off the net */
+ spin_unlock(&rs->rs_lock);
class_export_put (exp);
rs->rs_export = NULL;
}
/* still on the net; callback will schedule */
- cfs_spin_unlock(&rs->rs_lock);
+ spin_unlock(&rs->rs_lock);
RETURN(1);
}
struct ptlrpc_reply_state *rs = NULL;
ENTRY;
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
if (!cfs_list_empty(&svcpt->scp_rep_queue)) {
rs = cfs_list_entry(svcpt->scp_rep_queue.prev,
struct ptlrpc_reply_state,
rs_list);
cfs_list_del_init(&rs->rs_list);
}
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
if (rs != NULL)
ptlrpc_handle_rs(rs);
RETURN(rs != NULL);
goto out_srv_fini;
}
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
LASSERT(thread_is_starting(thread));
thread_clear_flags(thread, SVC_STARTING);
* we are now running, however we will exit as soon as possible */
thread_add_flags(thread, SVC_RUNNING);
svcpt->scp_nthrs_running++;
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
/* wake up our creator in case he's still waiting. */
cfs_waitq_signal(&thread->t_ctl_waitq);
thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
NULL, NULL);
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
cfs_waitq_signal(&svcpt->scp_rep_waitq);
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
svcpt->scp_nthrs_running);
CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
thread, thread->t_pid, thread->t_id, rc);
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
if (thread_test_and_clear_flags(thread, SVC_STARTING))
svcpt->scp_nthrs_starting--;
thread_add_flags(thread, SVC_STOPPED);
cfs_waitq_signal(&thread->t_ctl_waitq);
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
return rc;
}
{
int result;
- cfs_spin_lock(&hrt->hrt_lock);
+ spin_lock(&hrt->hrt_lock);
cfs_list_splice_init(&hrt->hrt_queue, replies);
result = ptlrpc_hr.hr_stopping || !cfs_list_empty(replies);
- cfs_spin_unlock(&hrt->hrt_lock);
+ spin_unlock(&hrt->hrt_lock);
return result;
}
CDEBUG(D_INFO, "Stopping threads for service %s\n",
svcpt->scp_service->srv_name);
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
/* let the thread know that we would like it to stop asap */
list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
CDEBUG(D_INFO, "Stopping thread %s #%u\n",
cfs_list_add(&thread->t_link, &zombie);
continue;
}
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
svcpt->scp_service->srv_thread_name, thread->t_id);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread), &lwi);
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
}
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
while (!cfs_list_empty(&zombie)) {
thread = cfs_list_entry(zombie.next,
RETURN(-ENOMEM);
cfs_waitq_init(&thread->t_ctl_waitq);
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
if (!ptlrpc_threads_increasable(svcpt)) {
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
OBD_FREE_PTR(thread);
RETURN(-EMFILE);
}
/* serialize starting because some modules (obdfilter)
* might require unique and contiguous t_id */
LASSERT(svcpt->scp_nthrs_starting == 1);
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
OBD_FREE_PTR(thread);
if (wait) {
CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
thread->t_svcpt = svcpt;
cfs_list_add(&thread->t_link, &svcpt->scp_threads);
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
if (svcpt->scp_cpt >= 0) {
snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d",
if (rc < 0) {
CERROR("cannot start thread '%s': rc %d\n",
thread->t_name, rc);
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
cfs_list_del(&thread->t_link);
--svcpt->scp_nthrs_starting;
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
OBD_FREE(thread, sizeof(*thread));
RETURN(rc);
hrt->hrt_id = j;
hrt->hrt_partition = hrp;
cfs_waitq_init(&hrt->hrt_waitq);
- cfs_spin_lock_init(&hrt->hrt_lock);
+ spin_lock_init(&hrt->hrt_lock);
CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
}
}
/* Wait for the network to release any buffers
* it's currently filling */
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
while (svcpt->scp_nrqbds_posted != 0) {
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
/* Network access will complete in finite time but
* the HUGE timeout lets us CWARN for visibility
* of sluggish NALs */
"request buffers\n",
svcpt->scp_service->srv_name);
}
- cfs_spin_lock(&svcpt->scp_lock);
+ spin_lock(&svcpt->scp_lock);
}
- cfs_spin_unlock(&svcpt->scp_lock);
+ spin_unlock(&svcpt->scp_lock);
}
}
if (svcpt->scp_service == NULL)
break;
- cfs_spin_lock(&svcpt->scp_rep_lock);
+ spin_lock(&svcpt->scp_rep_lock);
while (!cfs_list_empty(&svcpt->scp_rep_active)) {
rs = cfs_list_entry(svcpt->scp_rep_active.next,
struct ptlrpc_reply_state, rs_list);
- cfs_spin_lock(&rs->rs_lock);
+ spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
- cfs_spin_unlock(&rs->rs_lock);
+ spin_unlock(&rs->rs_lock);
}
- cfs_spin_unlock(&svcpt->scp_rep_lock);
+ spin_unlock(&svcpt->scp_rep_lock);
/* purge the request queue. NB No new replies (rqbds
* all unlinked) and no service threads, so I'm the only
service->srv_is_stopping = 1;
- cfs_spin_lock(&ptlrpc_all_services_lock);
+ spin_lock(&ptlrpc_all_services_lock);
cfs_list_del_init(&service->srv_list);
- cfs_spin_unlock(&ptlrpc_all_services_lock);
+ spin_unlock(&ptlrpc_all_services_lock);
ptlrpc_lprocfs_unregister_service(service);
cfs_gettimeofday(&right_now);
- cfs_spin_lock(&svcpt->scp_req_lock);
+ spin_lock(&svcpt->scp_req_lock);
if (!ptlrpc_server_request_pending(svcpt, 1)) {
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
return 0;
}
}
timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
- cfs_spin_unlock(&svcpt->scp_req_lock);
+ spin_unlock(&svcpt->scp_req_lock);
if ((timediff / ONE_MILLION) >
(AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
/* r/w semaphore used to protect concurrent access to the quota
* parameters which are stored on disk */
- cfs_rw_semaphore_t lme_sem;
+ struct rw_semaphore lme_sem;
/* quota space that may be released after glimpse */
__u64 lme_may_rel;
unsigned int lse_pending_req;
/* rw spinlock protecting in-memory counters (i.e. lse_pending*) */
- cfs_rwlock_t lse_lock;
+ rwlock_t lse_lock;
/* waiter for pending request done */
cfs_waitq_t lse_waiters;
static inline void lqe_write_lock(struct lquota_entry *lqe)
{
if (lqe_is_master(lqe))
- cfs_down_write(&lqe->lqe_sem);
+ down_write(&lqe->lqe_sem);
else
- cfs_write_lock(&lqe->lqe_lock);
+ write_lock(&lqe->lqe_lock);
}
static inline void lqe_write_unlock(struct lquota_entry *lqe)
{
if (lqe_is_master(lqe))
- cfs_up_write(&lqe->lqe_sem);
+ up_write(&lqe->lqe_sem);
else
- cfs_write_unlock(&lqe->lqe_lock);
+ write_unlock(&lqe->lqe_lock);
}
static inline void lqe_read_lock(struct lquota_entry *lqe)
{
if (lqe_is_master(lqe))
- cfs_down_read(&lqe->lqe_sem);
+ down_read(&lqe->lqe_sem);
else
- cfs_read_lock(&lqe->lqe_lock);
+ read_lock(&lqe->lqe_lock);
}
static inline void lqe_read_unlock(struct lquota_entry *lqe)
{
if (lqe_is_master(lqe))
- cfs_up_read(&lqe->lqe_sem);
+ up_read(&lqe->lqe_sem);
else
- cfs_read_unlock(&lqe->lqe_lock);
+ read_unlock(&lqe->lqe_lock);
}
/*
thread_set_flags(&qmt->qmt_reba_thread, SVC_STOPPED);
cfs_waitq_init(&qmt->qmt_reba_thread.t_ctl_waitq);
CFS_INIT_LIST_HEAD(&qmt->qmt_reba_list);
- cfs_spin_lock_init(&qmt->qmt_reba_lock);
+ spin_lock_init(&qmt->qmt_reba_lock);
rc = qmt_start_reba_thread(qmt);
if (rc) {
CERROR("%s: failed to start rebalance thread (%d)\n",
LASSERT(lqe_is_master(lqe));
lqe->lqe_revoke_time = 0;
- cfs_init_rwsem(&lqe->lqe_sem);
+ init_rwsem(&lqe->lqe_sem);
}
/*
cfs_list_t qmt_reba_list;
/* lock protecting rebalancing list */
- cfs_spinlock_t qmt_reba_lock;
+ spinlock_t qmt_reba_lock;
unsigned long qmt_stopping:1; /* qmt is stopping */
static inline bool lqe_is_locked(struct lquota_entry *lqe)
{
LASSERT(lqe_is_master(lqe));
- if (cfs_down_write_trylock(&lqe->lqe_sem) == 0)
+ if (down_write_trylock(&lqe->lqe_sem) == 0)
return true;
lqe_write_unlock(lqe);
return false;
ENTRY;
lqe_getref(lqe);
- cfs_spin_lock(&qmt->qmt_reba_lock);
+ spin_lock(&qmt->qmt_reba_lock);
if (!qmt->qmt_stopping && cfs_list_empty(&lqe->lqe_link)) {
cfs_list_add_tail(&lqe->lqe_link, &qmt->qmt_reba_list);
added = true;
}
- cfs_spin_unlock(&qmt->qmt_reba_lock);
+ spin_unlock(&qmt->qmt_reba_lock);
if (added)
cfs_waitq_signal(&qmt->qmt_reba_thread.t_ctl_waitq);
!cfs_list_empty(&qmt->qmt_reba_list) ||
!thread_is_running(thread), &lwi);
- cfs_spin_lock(&qmt->qmt_reba_lock);
+ spin_lock(&qmt->qmt_reba_lock);
cfs_list_for_each_entry_safe(lqe, tmp, &qmt->qmt_reba_list,
lqe_link) {
cfs_list_del_init(&lqe->lqe_link);
- cfs_spin_unlock(&qmt->qmt_reba_lock);
+ spin_unlock(&qmt->qmt_reba_lock);
if (thread_is_running(thread))
qmt_id_lock_glimpse(env, qmt, lqe, NULL);
lqe_putref(lqe);
- cfs_spin_lock(&qmt->qmt_reba_lock);
+ spin_lock(&qmt->qmt_reba_lock);
}
- cfs_spin_unlock(&qmt->qmt_reba_lock);
+ spin_unlock(&qmt->qmt_reba_lock);
if (!thread_is_running(thread))
break;
ENTRY;
LASSERT(qfs != NULL);
- cfs_spin_lock(&qfs_list_lock);
+ spin_lock(&qfs_list_lock);
LASSERT(qfs->qfs_ref > 0);
qfs->qfs_ref--;
if (qfs->qfs_ref == 0) {
cfs_list_del(&qfs->qfs_link);
OBD_FREE_PTR(qfs);
}
- cfs_spin_unlock(&qfs_list_lock);
+ spin_unlock(&qfs_list_lock);
EXIT;
}
if (new == NULL)
RETURN(NULL);
- cfs_sema_init(&new->qfs_sem, 1);
+ sema_init(&new->qfs_sem, 1);
CFS_INIT_LIST_HEAD(&new->qfs_qsd_list);
strcpy(new->qfs_name, name);
new->qfs_ref = 1;
}
/* search in the fsinfo list */
- cfs_spin_lock(&qfs_list_lock);
+ spin_lock(&qfs_list_lock);
cfs_list_for_each_entry(qfs, &qfs_list, qfs_link) {
if (!strcmp(qfs->qfs_name, name)) {
qfs->qfs_ref++;
new = NULL;
}
out:
- cfs_spin_unlock(&qfs_list_lock);
+ spin_unlock(&qfs_list_lock);
if (new)
OBD_FREE_PTR(new);
if (strchr(valstr, 'g'))
enabled |= 1 << GRPQUOTA;
- cfs_down(&qfs->qfs_sem);
+ down(&qfs->qfs_sem);
if (qfs->qfs_enabled[pool - LQUOTA_FIRST_RES] == enabled)
/* no change required */
GOTO(out, rc = 0);
/* start reintegration only if qsd_prepare() was
* successfully called */
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
if (!qsd->qsd_prepared)
skip = true;
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
if (skip)
continue;
}
}
out:
- cfs_up(&qfs->qfs_sem);
+ up(&qfs->qfs_sem);
qsd_put_fsinfo(qfs);
RETURN(0);
}
LASSERT(!lqe_is_master(lqe));
/* initialize slave parameters */
- cfs_rwlock_init(&lqe->lqe_lock);
+ rwlock_init(&lqe->lqe_lock);
memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
lqe->lqe_pending_write = 0;
lqe->lqe_pending_req = 0;
struct ldlm_lock *lock;
ENTRY;
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
/* is the qsd about to shut down? */
if (qsd->qsd_stopping) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
LQUOTA_DEBUG(lqe, "dropping quota req since qsd is stopping");
/* Target is about to shut down, client will retry */
RETURN(-EINPROGRESS);
if (qsd->qsd_exp_valid)
imp = class_exp2cliimp(qsd->qsd_exp);
if (imp == NULL || imp->imp_invalid) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
LQUOTA_DEBUG(lqe, "connection to master not ready");
RETURN(-ENOTCONN);
}
* If the previous reintegration failed for some reason, we'll
* re-trigger it here as well. */
if (!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
LQUOTA_DEBUG(lqe, "not up-to-date, dropping request and "
"kicking off reintegration");
qsd_start_reint_thread(qqi);
/* Fill the remote global lock handle, master will check this handle
* to see if the slave is sending request with stale lock */
lustre_handle_copy(lockh, &qqi->qqi_lockh);
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
if (!lustre_handle_is_used(lockh))
RETURN(-ENOLCK);
RETURN(0);
/* We don't enforce quota until the qsd_instance is started */
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
if (!qsd->qsd_started) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
RETURN(0);
}
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
/* ignore block quota on MDTs, ignore inode quota on OSTs */
if ((!qsd->qsd_is_md && !qi->lqi_is_blk) ||
RETURN_EXIT;
/* We don't enforce quota until the qsd_instance is started */
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
if (!qsd->qsd_started) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
RETURN_EXIT;
}
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
LASSERT(trans != NULL);
RETURN_EXIT;
/* We don't enforce quota until the qsd_instance is started */
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
if (!qsd->qsd_started) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
RETURN_EXIT;
}
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
qqi = qsd->qsd_type_array[qtype];
LASSERT(qqi);
qid->qid_uid == 0)
RETURN_EXIT;
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
if (!qsd->qsd_started) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
RETURN_EXIT;
}
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
lqe = lqe_locate(env, qqi->qqi_site, qid);
if (IS_ERR(lqe)) {
cfs_list_t qsd_adjust_list;
/* lock protecting adjust list */
- cfs_spinlock_t qsd_adjust_lock;
+ spinlock_t qsd_adjust_lock;
/* dedicated thread for updating slave index files. */
struct ptlrpc_thread qsd_upd_thread;
* - the qsd update list
* - the deferred list
* - flags of the qsd_qtype_info */
- cfs_rwlock_t qsd_lock;
+ rwlock_t qsd_lock;
/* Default quota settings which apply to all identifiers */
/* when blk qunit reaches this value, later write reqs from client
/* list of all qsd_instance for this fs */
cfs_list_t qfs_qsd_list;
- cfs_semaphore_t qfs_sem;
+ struct semaphore qfs_sem;
/* link to the global quota fsinfo list. */
cfs_list_t qfs_link;
LASSERT(qsd != NULL);
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
if (qsd->qsd_stopping) {
/* don't mess up with shutdown procedure, it is already
* complicated enough */
qsd->qsd_type_array[qtype]->qqi_slv_uptodate = false;
}
}
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
if (rc)
return rc;
ldlm_namespace_get(class_exp2obd(qsd->qsd_exp)->obd_namespace);
qsd->qsd_ns = class_exp2obd(qsd->qsd_exp)->obd_namespace;
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
/* notify that qsd_exp is now valid */
qsd->qsd_exp_valid = true;
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
/* Now that the connection to master is setup, we can initiate the
* reintegration procedure for quota types which are enabled.
RETURN_EXIT;
CDEBUG(D_QUOTA, "%s: initiating QSD shutdown\n", qsd->qsd_svname);
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
qsd->qsd_stopping = true;
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
/* remove from the list of fsinfo */
if (!cfs_list_empty(&qsd->qsd_link)) {
LASSERT(qsd->qsd_fsinfo != NULL);
- cfs_down(&qsd->qsd_fsinfo->qfs_sem);
+ down(&qsd->qsd_fsinfo->qfs_sem);
cfs_list_del_init(&qsd->qsd_link);
- cfs_up(&qsd->qsd_fsinfo->qfs_sem);
+ up(&qsd->qsd_fsinfo->qfs_sem);
}
/* remove qsd proc entry */
RETURN(ERR_PTR(-ENOMEM));
/* generic initializations */
- cfs_rwlock_init(&qsd->qsd_lock);
+ rwlock_init(&qsd->qsd_lock);
CFS_INIT_LIST_HEAD(&qsd->qsd_link);
thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED);
cfs_waitq_init(&qsd->qsd_upd_thread.t_ctl_waitq);
CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list);
- cfs_spin_lock_init(&qsd->qsd_adjust_lock);
+ spin_lock_init(&qsd->qsd_adjust_lock);
CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list);
qsd->qsd_prepared = false;
qsd->qsd_started = false;
}
/* add in the list of lquota_fsinfo */
- cfs_down(&qsd->qsd_fsinfo->qfs_sem);
+ down(&qsd->qsd_fsinfo->qfs_sem);
list_add_tail(&qsd->qsd_link, &qsd->qsd_fsinfo->qfs_qsd_list);
- cfs_up(&qsd->qsd_fsinfo->qfs_sem);
+ up(&qsd->qsd_fsinfo->qfs_sem);
/* register procfs directory */
qsd->qsd_proc = lprocfs_register(QSD_DIR, osd_proc,
if (unlikely(qsd == NULL))
RETURN(0);
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
if (qsd->qsd_prepared) {
CERROR("%s: qsd instance already prepared\n", qsd->qsd_svname);
rc = -EALREADY;
}
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
if (rc)
RETURN(rc);
}
/* pools successfully setup, mark the qsd as prepared */
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
qsd->qsd_prepared = true;
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
/* start reintegration thread for each type, if required */
for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
if (unlikely(qsd == NULL))
RETURN(0);
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
if (!qsd->qsd_prepared) {
CERROR("%s: can't start qsd instance since it was properly "
"initialized\n", qsd->qsd_svname);
/* notify that the qsd_instance is now started */
qsd->qsd_started = true;
}
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
if (rc)
RETURN(rc);
/* we are losing the global index lock, so let's mark the
* global & slave indexes as not up-to-date any more */
- cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+ write_lock(&qqi->qqi_qsd->qsd_lock);
qqi->qqi_glb_uptodate = false;
qqi->qqi_slv_uptodate = false;
if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
- cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+ write_unlock(&qqi->qqi_qsd->qsd_lock);
CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));
{
int connected;
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
connected = qsd->qsd_exp_valid ? 1 : 0;
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
return connected;
}
{
int started;
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
started = qsd->qsd_started ? 1 : 0;
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
return started;
}
memset(&qti->qti_lvb, 0, sizeof(qti->qti_lvb));
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
/* check whether we already own a global quota lock for this type */
if (lustre_handle_is_used(&qqi->qqi_lockh) &&
ldlm_lock_addref_try(&qqi->qqi_lockh, qsd_glb_einfo.ei_mode) == 0) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
/* force refresh of global & slave index copy */
qti->qti_lvb.l_lquota.lvb_glb_ver = ~0ULL;
qti->qti_slv_ver = ~0ULL;
} else {
/* no valid lock found, let's enqueue a new one */
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
memset(&qti->qti_body, 0, sizeof(qti->qti_body));
memcpy(&qti->qti_body.qb_fid, &qqi->qqi_fid,
out_env:
OBD_FREE_PTR(env);
out:
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
qqi->qqi_reint = 0;
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
qqi_putref(qqi);
lu_ref_del(&qqi->qqi_reference, "reint_thread", thread);
ENTRY;
/* any pending quota adjust? */
- cfs_spin_lock(&qsd->qsd_adjust_lock);
+ spin_lock(&qsd->qsd_adjust_lock);
cfs_list_for_each_entry_safe(lqe, n, &qsd->qsd_adjust_list, lqe_link) {
if (lqe2qqi(lqe) == qqi) {
cfs_list_del_init(&lqe->lqe_link);
lqe_putref(lqe);
}
}
- cfs_spin_unlock(&qsd->qsd_adjust_lock);
+ spin_unlock(&qsd->qsd_adjust_lock);
/* any pending updates? */
- cfs_read_lock(&qsd->qsd_lock);
+ read_lock(&qsd->qsd_lock);
cfs_list_for_each_entry(upd, &qsd->qsd_upd_list, qur_link) {
if (upd->qur_qqi == qqi) {
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
CDEBUG(D_QUOTA, "%s: pending %s updates for type:%d.\n",
qsd->qsd_svname,
upd->qur_global ? "global" : "slave",
GOTO(out, updates = true);
}
}
- cfs_read_unlock(&qsd->qsd_lock);
+ read_unlock(&qsd->qsd_lock);
/* any pending quota request? */
cfs_hash_for_each_safe(qqi->qqi_site->lqs_hash, qsd_entry_iter_cb,
RETURN(0);
/* check if the reintegration has already started or finished */
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
if ((qqi->qqi_glb_uptodate && qqi->qqi_slv_uptodate) ||
qqi->qqi_reint || qsd->qsd_stopping) {
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
RETURN(0);
}
qqi->qqi_reint = 1;
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
/* there could be some unfinished global or index entry updates
* (very unlikely), to avoid them messing up with the reint
* procedure, we just return and try to re-start reint later. */
if (qsd_pending_updates(qqi)) {
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
qqi->qqi_reint = 0;
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
RETURN(0);
}
rc = cfs_create_thread(qsd_reint_main, (void *)qqi, 0);
if (rc < 0) {
thread_set_flags(thread, SVC_STOPPED);
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
qqi->qqi_reint = 0;
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
RETURN(rc);
}
idx_ver = global ? &qqi->qqi_glb_ver : &qqi->qqi_slv_ver;
list = global ? &qqi->qqi_deferred_glb : &qqi->qqi_deferred_slv;
- cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+ write_lock(&qqi->qqi_qsd->qsd_lock);
*idx_ver = ver;
if (global)
qqi->qqi_glb_uptodate = 1;
else
qqi->qqi_slv_uptodate = 1;
qsd_kickoff_deferred(qqi, list, ver);
- cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+ write_unlock(&qqi->qqi_qsd->qsd_lock);
}
/*
/* If we don't want update index version, no need to sort the
* records in version order, just schedule the updates instantly. */
if (ver == 0) {
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
qsd_upd_add(qsd, upd);
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
RETURN_EXIT;
}
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
cur_ver = global ? qqi->qqi_glb_ver : qqi->qqi_slv_ver;
qsd_add_deferred(list, upd);
}
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
EXIT;
}
bool added = false;
lqe_getref(lqe);
- cfs_spin_lock(&qsd->qsd_adjust_lock);
+ spin_lock(&qsd->qsd_adjust_lock);
/* the lqe is being queued for the per-ID lock cancel, we should
* cancel the lock cancel and re-add it for quota adjust */
cfs_list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
added = true;
}
- cfs_spin_unlock(&qsd->qsd_adjust_lock);
+ spin_unlock(&qsd->qsd_adjust_lock);
if (added)
cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
LASSERT(cfs_list_empty(upd));
*uptodate = true;
- cfs_spin_lock(&qsd->qsd_adjust_lock);
+ spin_lock(&qsd->qsd_adjust_lock);
if (!cfs_list_empty(&qsd->qsd_adjust_list)) {
struct lquota_entry *lqe;
lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
cfs_time_current_64()))
job_pending = true;
}
- cfs_spin_unlock(&qsd->qsd_adjust_lock);
+ spin_unlock(&qsd->qsd_adjust_lock);
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
if (!cfs_list_empty(&qsd->qsd_upd_list)) {
cfs_list_splice_init(&qsd->qsd_upd_list, upd);
job_pending = true;
*uptodate = false;
}
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
return job_pending;
}
qsd_upd_free(upd);
}
- cfs_spin_lock(&qsd->qsd_adjust_lock);
+ spin_lock(&qsd->qsd_adjust_lock);
cur_time = cfs_time_current_64();
cfs_list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
lqe_link) {
break;
cfs_list_del_init(&lqe->lqe_link);
- cfs_spin_unlock(&qsd->qsd_adjust_lock);
+ spin_unlock(&qsd->qsd_adjust_lock);
if (thread_is_running(thread) && uptodate) {
qsd_refresh_usage(env, lqe);
}
lqe_putref(lqe);
- cfs_spin_lock(&qsd->qsd_adjust_lock);
+ spin_lock(&qsd->qsd_adjust_lock);
}
- cfs_spin_unlock(&qsd->qsd_adjust_lock);
+ spin_unlock(&qsd->qsd_adjust_lock);
if (!thread_is_running(thread))
break;
if (qqi == NULL)
continue;
- cfs_write_lock(&qsd->qsd_lock);
+ write_lock(&qsd->qsd_lock);
cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
qur_link) {
CWARN("%s: Free global deferred upd: ID:"LPU64", "
list_del_init(&upd->qur_link);
qsd_upd_free(upd);
}
- cfs_write_unlock(&qsd->qsd_lock);
+ write_unlock(&qsd->qsd_lock);
}
}
{
struct lquota_entry *lqe;
- cfs_spin_lock(&qsd->qsd_adjust_lock);
+ spin_lock(&qsd->qsd_adjust_lock);
while (!cfs_list_empty(&qsd->qsd_adjust_list)) {
lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
struct lquota_entry, lqe_link);
cfs_list_del_init(&lqe->lqe_link);
lqe_putref(lqe);
}
- cfs_spin_unlock(&qsd->qsd_adjust_lock);
+ spin_unlock(&qsd->qsd_adjust_lock);
}
void qsd_stop_upd_thread(struct qsd_instance *qsd)
return;
/* Clear bit when lcd is freed */
LASSERT(lut->lut_client_bitmap);
- if (!cfs_test_and_clear_bit(ted->ted_lr_idx, lut->lut_client_bitmap)) {
+ if (!test_and_clear_bit(ted->ted_lr_idx, lut->lut_client_bitmap)) {
CERROR("%s: client %u bit already clear in bitmap\n",
exp->exp_obd->obd_name, ted->ted_lr_idx);
LBUG();
/* can't add callback, do sync now */
th->th_sync = 1;
} else {
- cfs_spin_lock(&exp->exp_lock);
+ spin_lock(&exp->exp_lock);
exp->exp_need_sync = 1;
- cfs_spin_unlock(&exp->exp_lock);
+ spin_unlock(&exp->exp_lock);
}
tti->tti_off = ted->ted_lr_off;
tgt->lut_last_transno);
/* Always save latest transno to keep it fresh */
- cfs_spin_lock(&tgt->lut_translock);
+ spin_lock(&tgt->lut_translock);
tgt->lut_lsd.lsd_last_transno = tgt->lut_last_transno;
- cfs_spin_unlock(&tgt->lut_translock);
+ spin_unlock(&tgt->lut_translock);
th = dt_trans_create(env, tgt->lut_bottom);
if (IS_ERR(th))
return;
}
- cfs_spin_lock(&tgt->lut_translock);
+ spin_lock(&tgt->lut_translock);
start_epoch = lr_epoch(tgt->lut_last_transno) + 1;
tgt->lut_last_transno = (__u64)start_epoch << LR_EPOCH_BITS;
tgt->lut_lsd.lsd_start_epoch = start_epoch;
- cfs_spin_unlock(&tgt->lut_translock);
+ spin_unlock(&tgt->lut_translock);
CFS_INIT_LIST_HEAD(&client_list);
/**
* The recovery is not yet finished and final queue can still be updated
* with resend requests. Move final list to separate one for processing
*/
- cfs_spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
+ spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
cfs_list_splice_init(&tgt->lut_obd->obd_final_req_queue, &client_list);
- cfs_spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
+ spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
/**
* go through list of exports participated in recovery and
tgt_client_epoch_update(&env, req->rq_export);
}
/** return list back at once */
- cfs_spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
+ spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
cfs_list_splice_init(&client_list, &tgt->lut_obd->obd_final_req_queue);
- cfs_spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
+ spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
/** update server epoch */
tgt_server_data_update(&env, tgt, 1);
lu_env_fini(&env);
LASSERT(ccb->llcc_tgt != NULL);
LASSERT(ccb->llcc_exp->exp_obd == ccb->llcc_tgt->lut_obd);
- cfs_spin_lock(&ccb->llcc_tgt->lut_translock);
+ spin_lock(&ccb->llcc_tgt->lut_translock);
if (ccb->llcc_transno > ccb->llcc_tgt->lut_obd->obd_last_committed)
ccb->llcc_tgt->lut_obd->obd_last_committed = ccb->llcc_transno;
LASSERT(ccb->llcc_exp);
if (ccb->llcc_transno > ccb->llcc_exp->exp_last_committed) {
ccb->llcc_exp->exp_last_committed = ccb->llcc_transno;
- cfs_spin_unlock(&ccb->llcc_tgt->lut_translock);
+ spin_unlock(&ccb->llcc_tgt->lut_translock);
ptlrpc_commit_replies(ccb->llcc_exp);
} else {
- cfs_spin_unlock(&ccb->llcc_tgt->lut_translock);
+ spin_unlock(&ccb->llcc_tgt->lut_translock);
}
class_export_cb_put(ccb->llcc_exp);
if (ccb->llcc_transno)
ccb->lncc_exp->exp_obd->obd_name,
ccb->lncc_exp->exp_client_uuid.uuid);
- cfs_spin_lock(&ccb->lncc_exp->exp_lock);
+ spin_lock(&ccb->lncc_exp->exp_lock);
ccb->lncc_exp->exp_need_sync = 0;
- cfs_spin_unlock(&ccb->lncc_exp->exp_lock);
+ spin_unlock(&ccb->lncc_exp->exp_lock);
class_export_cb_put(ccb->lncc_exp);
OBD_FREE_PTR(ccb);
if (!strcmp(ted->ted_lcd->lcd_uuid, tgt->lut_obd->obd_uuid.uuid))
RETURN(0);
- cfs_mutex_init(&ted->ted_lcd_lock);
+ mutex_init(&ted->ted_lcd_lock);
if ((exp->exp_connect_flags & OBD_CONNECT_LIGHTWEIGHT) != 0)
RETURN(0);
/* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
* there's no need for extra complication here
*/
- idx = cfs_find_first_zero_bit(tgt->lut_client_bitmap, LR_MAX_CLIENTS);
+ idx = find_first_zero_bit(tgt->lut_client_bitmap, LR_MAX_CLIENTS);
repeat:
if (idx >= LR_MAX_CLIENTS ||
OBD_FAIL_CHECK(OBD_FAIL_MDS_CLIENT_ADD)) {
tgt->lut_obd->obd_name, idx);
RETURN(-EOVERFLOW);
}
- if (cfs_test_and_set_bit(idx, tgt->lut_client_bitmap)) {
- idx = cfs_find_next_zero_bit(tgt->lut_client_bitmap,
+ if (test_and_set_bit(idx, tgt->lut_client_bitmap)) {
+ idx = find_next_zero_bit(tgt->lut_client_bitmap,
LR_MAX_CLIENTS, idx);
goto repeat;
}
(exp->exp_connect_flags & OBD_CONNECT_LIGHTWEIGHT) != 0)
RETURN(0);
- if (cfs_test_and_set_bit(idx, tgt->lut_client_bitmap)) {
+ if (test_and_set_bit(idx, tgt->lut_client_bitmap)) {
CERROR("%s: client %d: bit already set in bitmap!!\n",
tgt->lut_obd->obd_name, idx);
LBUG();
ted->ted_lr_off = tgt->lut_lsd.lsd_client_start +
idx * tgt->lut_lsd.lsd_client_size;
- cfs_mutex_init(&ted->ted_lcd_lock);
+ mutex_init(&ted->ted_lcd_lock);
LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
/* Clear the bit _after_ zeroing out the client so we don't
race with filter_client_add and zero out new clients.*/
- if (!cfs_test_bit(ted->ted_lr_idx, tgt->lut_client_bitmap)) {
+ if (!test_bit(ted->ted_lr_idx, tgt->lut_client_bitmap)) {
CERROR("%s: client %u: bit already clear in bitmap!!\n",
tgt->lut_obd->obd_name, ted->ted_lr_idx);
LBUG();
RETURN(rc);
}
- cfs_mutex_lock(&ted->ted_lcd_lock);
+ mutex_lock(&ted->ted_lcd_lock);
memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
rc = tgt_client_data_update(env, exp);
- cfs_mutex_unlock(&ted->ted_lcd_lock);
+ mutex_unlock(&ted->ted_lcd_lock);
CDEBUG(rc == 0 ? D_INFO : D_ERROR,
"%s: zeroing out client %s at idx %u (%llu), rc %d\n",
obd->u.obt.obt_lut = lut;
obd->u.obt.obt_magic = OBT_MAGIC;
- cfs_spin_lock_init(&lut->lut_translock);
+ spin_lock_init(&lut->lut_translock);
OBD_ALLOC(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
if (lut->lut_client_bitmap == NULL)