Whamcloud - gitweb
LU-1346 libcfs: replace libcfs wrappers with kernel API
authorAndreas Dilger <adilger@whamcloud.com>
Tue, 4 Dec 2012 20:44:31 +0000 (13:44 -0700)
committerOleg Drokin <green@whamcloud.com>
Wed, 5 Dec 2012 13:48:49 +0000 (08:48 -0500)
The libcfs kernel portability library had wrappers for many low-level
kernel functions (locking, bit operations, etc) that were simple
wrappers around Linux kernel functions.  This provides no value for
Linux clients and clients for other kernels are not under development.

Remove the cfs_ prefix from these simple wrapper functions.  For other
kernels, they will need to use the Linux kernel API for portability.

Affected primitives:
spinlock_t, spin_lock_init, spin_lock, spin_unlock, spin_lock_bh,
spin_lock_bh_init, spin_unlock_bh, spin_trylock, spin_is_locked,
spin_lock_irq, spin_unlock_irq, read_lock_irqsave, write_lock_irqsave,
read_lock_irqrestore, write_lock_irqrestore, spin_lock_irqsave,
spin_unlock_irqrestore, SPIN_LOCK_UNLOCKED

rw_semaphore, init_rwsem, down_read, down_read_trylock, up_read,
down_write, down_write_trylock, up_write, fini_rwsem, DECLARE_RWSEM

semaphore, rw_semaphore, init_completion_module, call_wait_handler,
wait_handler_t, mt_completion_t, mt_init_completion,
mt_wait_for_completion, mt_complete, mt_fini_completion, mt_atomic_t,
mt_atomic_read, mt_atomic_set, mt_atomic_dec_and_test, mt_atomic_inc,
mt_atomic_dec, mt_atomic_add, mt_atomic_sub

rw_lock_t, rwlock_init, read_lock, read_unlock,
read_unlock_irqrestore, write_lock, write_unlock, write_lock_bh,
write_unlock_bh, RW_LOCK_UNLOCKED

completion_t, DECLARE_COMPLETION, INIT_COMPLETION, complete,
COMPLETION_INITIALIZER, init_completion, wait_for_completion,
wait_for_completion_interruptible, complete_and_exit, fini_completion

semaphore_t, DEFINE_SEMAPHORE, sema_init, up, down,
down_interruptible, down_trylock

mutex_t, DEFINE_MUTEX, mutex_init, mutex_lock, mutex_unlock,
mutex_lock_interruptible, mutex_trylock, mutex_is_locked,
mutex_destroy

lock_kernel, unlock_kernel

lock_class_key, lock_class_key_t, lockdep_set_class, lockdep_off,
lockdep_on, mutext_lock_nexted, spin_lock_nexted, down_read_nested,
down_write_nested

test_bit, set_bit, clear_bit, test_and_set_bit, test_and_clear_bit,
find_first_bit, find_first_zero_bit, find_next_bit,
find_next_zero_bit, ffz, ffs, fls

Change-Id: I36db204c703ed414504eaa9ba22e97ad7eb6cc2c
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Oleg Drokin <green@whamcloud.com>
Reviewed-on: http://review.whamcloud.com/2829
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
319 files changed:
build/libcfs_cleanup.sed [new file with mode: 0644]
libcfs/include/libcfs/bitmap.h
libcfs/include/libcfs/darwin/darwin-lock.h
libcfs/include/libcfs/libcfs_hash.h
libcfs/include/libcfs/libcfs_private.h
libcfs/include/libcfs/linux/linux-bitops.h
libcfs/include/libcfs/linux/linux-lock.h
libcfs/include/libcfs/linux/portals_compat25.h
libcfs/include/libcfs/lucache.h
libcfs/include/libcfs/params_tree.h
libcfs/include/libcfs/user-bitops.h
libcfs/include/libcfs/user-lock.h
libcfs/include/libcfs/winnt/portals_utils.h
libcfs/include/libcfs/winnt/winnt-fs.h
libcfs/include/libcfs/winnt/winnt-lock.h
libcfs/include/libcfs/winnt/winnt-mem.h
libcfs/include/libcfs/winnt/winnt-prim.h
libcfs/include/libcfs/winnt/winnt-tcpip.h
libcfs/libcfs/darwin/darwin-mem.c
libcfs/libcfs/fail.c
libcfs/libcfs/hash.c
libcfs/libcfs/kernel_user_comm.c
libcfs/libcfs/libcfs_lock.c
libcfs/libcfs/linux/linux-lwt.c
libcfs/libcfs/linux/linux-prim.c
libcfs/libcfs/linux/linux-tracefile.c
libcfs/libcfs/module.c
libcfs/libcfs/nidstrings.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/tracefile.h
libcfs/libcfs/upcall_cache.c
libcfs/libcfs/user-bitops.c
libcfs/libcfs/user-lock.c
libcfs/libcfs/user-prim.c
libcfs/libcfs/watchdog.c
libcfs/libcfs/winnt/winnt-curproc.c
libcfs/libcfs/winnt/winnt-lock.c
libcfs/libcfs/winnt/winnt-mem.c
libcfs/libcfs/winnt/winnt-prim.c
libcfs/libcfs/winnt/winnt-proc.c
libcfs/libcfs/winnt/winnt-sync.c
libcfs/libcfs/winnt/winnt-tcpip.c
libcfs/libcfs/winnt/winnt-tracefile.c
libcfs/libcfs/workitem.c
lnet/include/lnet/lib-lnet.h
lnet/include/lnet/lib-types.h
lnet/klnds/mxlnd/mxlnd.c
lnet/klnds/mxlnd/mxlnd.h
lnet/klnds/mxlnd/mxlnd_cb.c
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd.h
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/ptllnd/ptllnd.c
lnet/klnds/ptllnd/ptllnd.h
lnet/klnds/ptllnd/ptllnd_cb.c
lnet/klnds/ptllnd/ptllnd_peer.c
lnet/klnds/ptllnd/ptllnd_rx_buf.c
lnet/klnds/ptllnd/ptllnd_tx.c
lnet/klnds/qswlnd/qswlnd.c
lnet/klnds/qswlnd/qswlnd.h
lnet/klnds/qswlnd/qswlnd_cb.c
lnet/klnds/ralnd/ralnd.c
lnet/klnds/ralnd/ralnd.h
lnet/klnds/ralnd/ralnd_cb.c
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c
lnet/klnds/socklnd/socklnd_lib-linux.c
lnet/klnds/socklnd/socklnd_lib-winnt.c
lnet/klnds/socklnd/socklnd_proto.c
lnet/lnet/acceptor.c
lnet/lnet/api-ni.c
lnet/lnet/config.c
lnet/lnet/lib-ptl.c
lnet/lnet/module.c
lnet/lnet/router.c
lnet/selftest/conctl.c
lnet/selftest/conrpc.c
lnet/selftest/console.c
lnet/selftest/console.h
lnet/selftest/framework.c
lnet/selftest/ping_test.c
lnet/selftest/rpc.c
lnet/selftest/selftest.h
lnet/selftest/timer.c
lnet/ulnds/socklnd/conn.c
lnet/ulnds/socklnd/poll.c
lnet/ulnds/socklnd/usocklnd.c
lnet/ulnds/socklnd/usocklnd.h
lustre/fid/fid_handler.c
lustre/fid/fid_request.c
lustre/fid/lproc_fid.c
lustre/fld/fld_cache.c
lustre/fld/fld_handler.c
lustre/fld/fld_internal.h
lustre/fld/fld_request.c
lustre/fld/lproc_fld.c
lustre/include/cl_object.h
lustre/include/dt_object.h
lustre/include/lclient.h
lustre/include/liblustre.h
lustre/include/linux/lustre_compat25.h
lustre/include/linux/lustre_fsfilt.h
lustre/include/linux/lustre_patchless_compat.h
lustre/include/linux/obd.h
lustre/include/lprocfs_status.h
lustre/include/lu_object.h
lustre/include/lu_ref.h
lustre/include/lu_target.h
lustre/include/lustre_capa.h
lustre/include/lustre_dlm.h
lustre/include/lustre_export.h
lustre/include/lustre_fid.h
lustre/include/lustre_fld.h
lustre/include/lustre_handles.h
lustre/include/lustre_idmap.h
lustre/include/lustre_import.h
lustre/include/lustre_lib.h
lustre/include/lustre_lite.h
lustre/include/lustre_log.h
lustre/include/lustre_mdc.h
lustre/include/lustre_net.h
lustre/include/lustre_sec.h
lustre/include/md_object.h
lustre/include/obd.h
lustre/include/obd_class.h
lustre/lclient/lcommon_cl.c
lustre/lclient/lcommon_misc.c
lustre/ldlm/l_lock.c
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lock.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_request.c
lustre/ldlm/ldlm_resource.c
lustre/llite/dcache.c
lustre/llite/dir.c
lustre/llite/file.c
lustre/llite/llite_capa.c
lustre/llite/llite_close.c
lustre/llite/llite_internal.h
lustre/llite/llite_lib.c
lustre/llite/llite_mmap.c
lustre/llite/llite_nfs.c
lustre/llite/llite_rmtacl.c
lustre/llite/lloop.c
lustre/llite/lproc_llite.c
lustre/llite/remote_perm.c
lustre/llite/rw.c
lustre/llite/statahead.c
lustre/llite/vvp_dev.c
lustre/llite/xattr.c
lustre/lmv/lmv_internal.h
lustre/lmv/lmv_obd.c
lustre/lmv/lmv_object.c
lustre/lmv/lproc_lmv.c
lustre/lod/lod_dev.c
lustre/lod/lod_internal.h
lustre/lod/lod_lov.c
lustre/lod/lod_pool.c
lustre/lod/lod_qos.c
lustre/lod/lproc_lod.c
lustre/lov/lov_cl_internal.h
lustre/lov/lov_dev.c
lustre/lov/lov_ea.c
lustre/lov/lov_internal.h
lustre/lov/lov_io.c
lustre/lov/lov_obd.c
lustre/lov/lov_object.c
lustre/lov/lov_pack.c
lustre/lov/lov_pool.c
lustre/lov/lov_request.c
lustre/lov/lovsub_object.c
lustre/lvfs/fsfilt_ext3.c
lustre/lvfs/lvfs_lib.c
lustre/lvfs/lvfs_linux.c
lustre/mdc/mdc_locks.c
lustre/mdc/mdc_request.c
lustre/mdd/mdd_device.c
lustre/mdd/mdd_dir.c
lustre/mdd/mdd_internal.h
lustre/mdd/mdd_lfsck.c
lustre/mdd/mdd_lock.c
lustre/mdd/mdd_lproc.c
lustre/mdd/mdd_object.c
lustre/mdt/mdt_capa.c
lustre/mdt/mdt_handler.c
lustre/mdt/mdt_identity.c
lustre/mdt/mdt_idmap.c
lustre/mdt/mdt_internal.h
lustre/mdt/mdt_lib.c
lustre/mdt/mdt_lproc.c
lustre/mdt/mdt_open.c
lustre/mdt/mdt_recovery.c
lustre/mdt/mdt_reint.c
lustre/mgc/mgc_request.c
lustre/mgs/lproc_mgs.c
lustre/mgs/mgs_handler.c
lustre/mgs/mgs_internal.h
lustre/mgs/mgs_llog.c
lustre/mgs/mgs_nids.c
lustre/obdclass/capa.c
lustre/obdclass/cl_io.c
lustre/obdclass/cl_lock.c
lustre/obdclass/cl_object.c
lustre/obdclass/cl_page.c
lustre/obdclass/class_obd.c
lustre/obdclass/genops.c
lustre/obdclass/idmap.c
lustre/obdclass/linux/linux-module.c
lustre/obdclass/linux/linux-obdo.c
lustre/obdclass/llog.c
lustre/obdclass/llog_cat.c
lustre/obdclass/llog_internal.h
lustre/obdclass/llog_ioctl.c
lustre/obdclass/llog_lvfs.c
lustre/obdclass/llog_obd.c
lustre/obdclass/llog_osd.c
lustre/obdclass/local_storage.c
lustre/obdclass/local_storage.h
lustre/obdclass/lprocfs_jobstats.c
lustre/obdclass/lprocfs_status.c
lustre/obdclass/lu_object.c
lustre/obdclass/lu_ref.c
lustre/obdclass/lustre_handles.c
lustre/obdclass/lustre_peer.c
lustre/obdclass/md_local_object.c
lustre/obdclass/obd_config.c
lustre/obdclass/obd_mount.c
lustre/obdecho/echo.c
lustre/obdecho/echo_client.c
lustre/ofd/lproc_ofd.c
lustre/ofd/ofd_capa.c
lustre/ofd/ofd_dev.c
lustre/ofd/ofd_fmd.c
lustre/ofd/ofd_fs.c
lustre/ofd/ofd_grant.c
lustre/ofd/ofd_internal.h
lustre/ofd/ofd_obd.c
lustre/ofd/ofd_objects.c
lustre/ofd/ofd_trans.c
lustre/osc/osc_cache.c
lustre/osc/osc_cl_internal.h
lustre/osc/osc_dev.c
lustre/osc/osc_internal.h
lustre/osc/osc_io.c
lustre/osc/osc_lock.c
lustre/osc/osc_object.c
lustre/osc/osc_page.c
lustre/osc/osc_request.c
lustre/osd-ldiskfs/osd_compat.c
lustre/osd-ldiskfs/osd_handler.c
lustre/osd-ldiskfs/osd_iam.c
lustre/osd-ldiskfs/osd_iam.h
lustre/osd-ldiskfs/osd_internal.h
lustre/osd-ldiskfs/osd_io.c
lustre/osd-ldiskfs/osd_lproc.c
lustre/osd-ldiskfs/osd_oi.c
lustre/osd-ldiskfs/osd_scrub.c
lustre/osd-ldiskfs/osd_scrub.h
lustre/osd-zfs/osd_handler.c
lustre/osd-zfs/osd_internal.h
lustre/osd-zfs/osd_io.c
lustre/osd-zfs/osd_object.c
lustre/osd-zfs/osd_xattr.c
lustre/osd-zfs/udmu.c
lustre/osd-zfs/udmu.h
lustre/osp/osp_dev.c
lustre/osp/osp_internal.h
lustre/osp/osp_object.c
lustre/osp/osp_precreate.c
lustre/osp/osp_sync.c
lustre/ost/ost_handler.c
lustre/ptlrpc/client.c
lustre/ptlrpc/events.c
lustre/ptlrpc/gss/gss_cli_upcall.c
lustre/ptlrpc/gss/gss_internal.h
lustre/ptlrpc/gss/gss_keyring.c
lustre/ptlrpc/gss/gss_krb5_mech.c
lustre/ptlrpc/gss/gss_mech_switch.c
lustre/ptlrpc/gss/gss_pipefs.c
lustre/ptlrpc/gss/gss_svc_upcall.c
lustre/ptlrpc/gss/lproc_gss.c
lustre/ptlrpc/gss/sec_gss.c
lustre/ptlrpc/import.c
lustre/ptlrpc/llog_client.c
lustre/ptlrpc/llog_net.c
lustre/ptlrpc/lproc_ptlrpc.c
lustre/ptlrpc/niobuf.c
lustre/ptlrpc/pack_generic.c
lustre/ptlrpc/pinger.c
lustre/ptlrpc/ptlrpc_module.c
lustre/ptlrpc/ptlrpcd.c
lustre/ptlrpc/recov_thread.c
lustre/ptlrpc/recover.c
lustre/ptlrpc/sec.c
lustre/ptlrpc/sec_bulk.c
lustre/ptlrpc/sec_config.c
lustre/ptlrpc/sec_gc.c
lustre/ptlrpc/sec_null.c
lustre/ptlrpc/sec_plain.c
lustre/ptlrpc/service.c
lustre/quota/lquota_internal.h
lustre/quota/qmt_dev.c
lustre/quota/qmt_entry.c
lustre/quota/qmt_internal.h
lustre/quota/qmt_lock.c
lustre/quota/qsd_config.c
lustre/quota/qsd_entry.c
lustre/quota/qsd_handler.c
lustre/quota/qsd_internal.h
lustre/quota/qsd_lib.c
lustre/quota/qsd_lock.c
lustre/quota/qsd_reint.c
lustre/quota/qsd_writeback.c
lustre/target/tgt_lastrcvd.c
lustre/target/tgt_main.c

diff --git a/build/libcfs_cleanup.sed b/build/libcfs_cleanup.sed
new file mode 100644 (file)
index 0000000..8b11c19
--- /dev/null
@@ -0,0 +1,361 @@
+#!/bin/sed -f
+
+# Script to cleanup libcfs macros, it runs against the tree at build time.
+# Migrate libcfs to emulate Linux kernel APIs.
+# http://jira.whamcloud.com/browse/LU-1346
+
+# remove extra blank line
+# /^$/{N;/^\n$/D}
+
+################################################################################
+# lock - spinlock, rw_semaphore, rwlock, completion, semaphore, mutex
+#      - lock_kernel, unlock_kernel, lockdep
+
+# spinlok
+/typedef  *spinlock_t  *cfs_spinlock_t;/d
+s/\bcfs_spinlock_t\b/spinlock_t/g
+s/\bcfs_spin_lock_init\b/spin_lock_init/g
+/#[ \t]*define[ \t]*\bspin_lock_init\b *( *\w* *)[ \t]*\bspin_lock_init\b *( *\w* *)/d
+s/\bcfs_spin_lock\b/spin_lock/g
+/#[ \t]*define[ \t]*\bspin_lock\b *( *\w* *)[ \t]*\bspin_lock\b *( *\w* *)/d
+s/\bcfs_spin_lock_bh\b/spin_lock_bh/g
+/#[ \t]*define[ \t]*\bspin_lock_bh\b *( *\w* *)[ \t]*\bspin_lock_bh\b *( *\w* *)/d
+s/\bcfs_spin_lock_bh_init\b/spin_lock_bh_init/g
+/#[ \t]*define[ \t]*\bspin_lock_bh_init\b *( *\w* *)[ \t]*\bspin_lock_bh_init\b *( *\w* *)/d
+s/\bcfs_spin_unlock\b/spin_unlock/g
+/#[ \t]*define[ \t]*\bspin_unlock\b *( *\w* *)[ \t]*\bspin_unlock\b *( *\w* *)/d
+s/\bcfs_spin_unlock_bh\b/spin_unlock_bh/g
+/#[ \t]*define[ \t]*\bspin_unlock_bh\b *( *\w* *)[ \t]*\bspin_unlock_bh\b *( *\w* *)/d
+s/\bcfs_spin_trylock\b/spin_trylock/g
+/#[ \t]*define[ \t]*\bspin_trylock\b *( *\w* *)[ \t]*\bspin_trylock\b *( *\w* *)/d
+s/\bcfs_spin_is_locked\b/spin_is_locked/g
+/#[ \t]*define[ \t]*\bspin_is_locked\b *( *\w* *)[ \t]*\bspin_is_locked\b *( *\w* *)/d
+
+s/\bcfs_spin_lock_irq\b/spin_lock_irq/g
+/#[ \t]*define[ \t]*\bspin_lock_irq\b *( *\w* *)[ \t]*\bspin_lock_irq\b *( *\w* *)/d
+s/\bcfs_spin_unlock_irq\b/spin_unlock_irq/g
+/#[ \t]*define[ \t]*\bspin_unlock_irq\b *( *\w* *)[ \t]*\bspin_unlock_irq\b *( *\w* *)/d
+s/\bcfs_read_lock_irqsave\b/read_lock_irqsave/g
+/#[ \t]*define[ \t]*\bread_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bread_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_write_lock_irqsave\b/write_lock_irqsave/g
+/#[ \t]*define[ \t]*\bwrite_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bwrite_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_write_unlock_irqrestore\b/write_unlock_irqrestore/g
+/#[ \t]*define[ \t]*\bwrite_unlock_irqrestore\b *( *\w* *, *\w* *)[ \t]*\bwrite_unlock_irqrestore\b *( *\w* *, *\w* *)/d
+s/\bcfs_spin_lock_irqsave\b/spin_lock_irqsave/g
+/#[ \t]*define[ \t]*\bspin_lock_irqsave\b *( *\w* *, *\w* *)[ \t]*\bspin_lock_irqsave\b *( *\w* *, *\w* *)/d
+s/\bcfs_spin_unlock_irqrestore\b/spin_unlock_irqrestore/g
+/#[ \t]*define[ \t]*\bspin_unlock_irqrestore\b *( *\w* *, *\w* *)[ \t]*\bspin_unlock_irqrestore\b *( *\w* *, *\w* *)/d
+s/\bCFS_SPIN_LOCK_UNLOCKED\b/SPIN_LOCK_UNLOCKED/g
+/#[ \t]*define[ \t]*\bSPIN_LOCK_UNLOCKED\b[ \t]*\bSPIN_LOCK_UNLOCKED\b/d
+
+# rw_semaphore
+s/\bcfs_rw_semaphore_t\b/struct rw_semaphore/g
+s/\bcfs_init_rwsem\b/init_rwsem/g
+/#[ \t]*define[ \t]*\binit_rwsem\b *( *\w* *)[ \t]*\binit_rwsem\b *( *\w* *)/d
+s/\bcfs_down_read\b/down_read/g
+/#[ \t]*define[ \t]*\bdown_read\b *( *\w* *)[ \t]*\bdown_read\b *( *\w* *)/d
+s/\bcfs_down_read_trylock\b/down_read_trylock/g
+/#[ \t]*define[ \t]*\bdown_read_trylock\b *( *\w* *)[ \t]*\bdown_read_trylock\b *( *\w* *)/d
+s/\bcfs_up_read\b/up_read/g
+/#[ \t]*define[ \t]*\bup_read\b *( *\w* *)[ \t]*\bup_read\b *( *\w* *)/d
+s/\bcfs_down_write\b/down_write/g
+/#[ \t]*define[ \t]*\bdown_write\b *( *\w* *)[ \t]*\bdown_write\b *( *\w* *)/d
+s/\bcfs_down_write_trylock\b/down_write_trylock/g
+/#[ \t]*define[ \t]*\bdown_write_trylock\b *( *\w* *)[ \t]*\bdown_write_trylock\b *( *\w* *)/d
+s/\bcfs_up_write\b/up_write/g
+/#[ \t]*define[ \t]*\bup_write\b *( *\w* *)[ \t]*\bup_write\b *( *\w* *)/d
+s/\bcfs_fini_rwsem\b/fini_rwsem/g
+s/\bCFS_DECLARE_RWSEM\b/DECLARE_RWSEM/g
+/#[ \t]*define[ \t]*\bDECLARE_RWSEM\b *( *\w* *)[ \t]*\bDECLARE_RWSEM\b *( *\w* *)/d
+
+s/\bcfs_semaphore\b/semaphore/g
+s/\bcfs_rw_semaphore\b/rw_semaphore/g
+s/\bcfs_init_completion_module\b/init_completion_module/g
+s/\bcfs_call_wait_handler\b/call_wait_handler/g
+s/\bcfs_wait_handler_t\b/wait_handler_t/g
+s/\bcfs_mt_completion_t\b/mt_completion_t/g
+s/\bcfs_mt_init_completion\b/mt_init_completion/g
+s/\bcfs_mt_wait_for_completion\b/mt_wait_for_completion/g
+s/\bcfs_mt_complete\b/mt_complete/g
+s/\bcfs_mt_fini_completion\b/mt_fini_completion/g
+s/\bcfs_mt_atomic_t\b/mt_atomic_t/g
+s/\bcfs_mt_atomic_read\b/mt_atomic_read/g
+s/\bcfs_mt_atomic_set\b/mt_atomic_set/g
+s/\bcfs_mt_atomic_dec_and_test\b/mt_atomic_dec_and_test/g
+s/\bcfs_mt_atomic_inc\b/mt_atomic_inc/g
+s/\bcfs_mt_atomic_dec\b/mt_atomic_dec/g
+s/\bcfs_mt_atomic_add\b/mt_atomic_add/g
+s/\bcfs_mt_atomic_sub\b/mt_atomic_sub/g
+
+# rwlock
+/typedef  *rwlock_t  *cfs_rwlock_t;/d
+s/\bcfs_rwlock_t\b/rwlock_t/g
+s/\bcfs_rwlock_init\b/rwlock_init/g
+/#[ \t]*define[ \t]*\brwlock_init\b *( *\w* *)[ \t]*\brwlock_init\b *( *\w* *)/d
+s/\bcfs_read_lock\b/read_lock/g
+/#[ \t]*define[ \t]*\bread_lock\b *( *\w* *)[ \t]*\bread_lock\b *( *\w* *)/d
+s/\bcfs_read_unlock\b/read_unlock/g
+/#[ \t]*define[ \t]*\bread_unlock\b *( *\w* *)[ \t]*\bread_unlock\b *( *\w* *)/d
+s/\bcfs_read_unlock_irqrestore\b/read_unlock_irqrestore/g
+#/#[ \t]*define[ \t]*\bread_unlock_irqrestore\b *( *\w* *)[ \t]*\bread_unlock_irqrestore\b *( *\w* *)/d
+/#define read_unlock_irqrestore(lock,flags) \\/{N;d}
+s/\bcfs_write_lock\b/write_lock/g
+/#[ \t]*define[ \t]*\bwrite_lock\b *( *\w* *)[ \t]*\bwrite_lock\b *( *\w* *)/d
+s/\bcfs_write_unlock\b/write_unlock/g
+/#[ \t]*define[ \t]*\bwrite_unlock\b *( *\w* *)[ \t]*\bwrite_unlock\b *( *\w* *)/d
+s/\bcfs_write_lock_bh\b/write_lock_bh/g
+/#[ \t]*define[ \t]*\bwrite_lock_bh\b *( *\w* *)[ \t]*\bwrite_lock_bh\b *( *\w* *)/d
+s/\bcfs_write_unlock_bh\b/write_unlock_bh/g
+/#[ \t]*define[ \t]*\bwrite_unlock_bh\b *( *\w* *)[ \t]*\bwrite_unlock_bh\b *( *\w* *)/d
+s/\bCFS_RW_LOCK_UNLOCKED\b/RW_LOCK_UNLOCKED/g
+/#[ \t]*define[ \t]*\bRW_LOCK_UNLOCKED\b  *\bRW_LOCK_UNLOCKED\b */d
+
+# completion
+s/\bcfs_completion_t\b/struct completion/g
+s/\bCFS_DECLARE_COMPLETION\b/DECLARE_COMPLETION/g
+/#[ \t]*define[ \t]*\bDECLARE_COMPLETION\b *( *\w* *)[ \t]*\bDECLARE_COMPLETION\b *( *\w* *)/d
+s/\bCFS_INIT_COMPLETION\b/INIT_COMPLETION/g
+/#[ \t]*define[ \t]*\bINIT_COMPLETION\b *( *\w* *)[ \t]*\bINIT_COMPLETION\b *( *\w* *)/d
+s/\bCFS_COMPLETION_INITIALIZER\b/COMPLETION_INITIALIZER/g
+/#[ \t]*define[ \t]*\bCOMPLETION_INITIALIZER\b *( *\w* *)[ \t]*\bCOMPLETION_INITIALIZER\b *( *\w* *)/d
+s/\bcfs_init_completion\b/init_completion/g
+/#[ \t]*define[ \t]*\binit_completion\b *( *\w* *)[ \t]*\binit_completion\b *( *\w* *)/d
+s/\bcfs_complete\b/complete/g
+/#[ \t]*define[ \t]*\bcomplete\b *( *\w* *)[ \t]*\bcomplete\b *( *\w* *)/d
+s/\bcfs_wait_for_completion\b/wait_for_completion/g
+/#[ \t]*define[ \t]*\bwait_for_completion\b *( *\w* *)[ \t]*\bwait_for_completion\b *( *\w* *)/d
+s/\bcfs_wait_for_completion_interruptible\b/wait_for_completion_interruptible/g
+/#define wait_for_completion_interruptible(c) \\/{N;d}
+s/\bcfs_complete_and_exit\b/complete_and_exit/g
+/#[ \t]*define[ \t]*\bcomplete_and_exit\b *( *\w* *, *\w* *)[ \t]*\bcomplete_and_exit\b *( *\w* *, *\w* *)/d
+s/\bcfs_fini_completion\b/fini_completion/g
+
+# semaphore
+s/\bcfs_semaphore_t\b/struct semaphore/g
+s/\bCFS_DEFINE_SEMAPHORE\b/DEFINE_SEMAPHORE/g
+/#[ \t]*define[ \t]*\bDEFINE_SEMAPHORE\b *( *\w* *)[ \t]*\bDEFINE_SEMAPHORE\b *( *\w* *)/d
+s/\bcfs_sema_init\b/sema_init/g
+/#[ \t]*define[ \t]*\bsema_init\b *( *\w* *, *\w* *)[ \t]*\bsema_init\b *( *\w* *, *\w* *)/d
+s/\bcfs_up\b/up/g
+/#[ \t]*define[ \t]*\bup\b *( *\w* *)[ \t]*\bup\b *( *\w* *)/d
+s/\bcfs_down\b/down/g
+/#[ \t]*define[ \t]*\bdown\b *( *\w* *)[ \t]*\bdown\b *( *\w* *)/d
+s/\bcfs_down_interruptible\b/down_interruptible/g
+/#[ \t]*define[ \t]*\bdown_interruptible\b *( *\w* *)[ \t]*\bdown_interruptible\b *( *\w* *)/d
+s/\bcfs_down_trylock\b/down_trylock/g
+/#[ \t]*define[ \t]*\bdown_trylock\b *( *\w* *)[ \t]*\bdown_trylock\b *( *\w* *)/d
+
+# mutex
+s/\bcfs_mutex_t\b/struct mutex/g
+s/\bCFS_DEFINE_MUTEX\b/DEFINE_MUTEX/g
+/#[ \t]*define[ \t]*\DEFINE_MUTEX\b *( *name *)[ \t]*\bDEFINE_MUTEX\b *( *name *)/d
+s/\bcfs_mutex_init\b/mutex_init/g
+/#[ \t]*define[ \t]*\bmutex_init\b *( *\w* *)[ \t]*\bmutex_init\b *( *\w* *)/d
+s/\bcfs_mutex_lock\b/mutex_lock/g
+/#[ \t]*define[ \t]*\bmutex_lock\b *( *\w* *)[ \t]*\bmutex_lock\b *( *\w* *)/d
+s/\bcfs_mutex_unlock\b/mutex_unlock/g
+/#[ \t]*define[ \t]*\bmutex_unlock\b *( *\w* *)[ \t]*\bmutex_unlock\b *( *\w* *)/d
+s/\bcfs_mutex_lock_interruptible\b/mutex_lock_interruptible/g
+/#[ \t]*define[ \t]*\bmutex_lock_interruptible\b *( *\w* *)[ \t]*\bmutex_lock_interruptible\b *( *\w* *)/d
+s/\bcfs_mutex_trylock\b/mutex_trylock/g
+/#[ \t]*define[ \t]*\bmutex_trylock\b *( *\w* *)[ \t]*\bmutex_trylock\b *( *\w* *)/d
+s/\bcfs_mutex_is_locked\b/mutex_is_locked/g
+/#[ \t]*define[ \t]*\bmutex_is_locked\b *( *\w* *)[ \t]*\bmutex_is_locked\b *( *\w* *)/d
+s/\bcfs_mutex_destroy\b/mutex_destroy/g
+/#[ \t]*define[ \t]*\bmutex_destroy\b *( *\w* *)[ \t]*\bmutex_destroy\b *( *\w* *)/d
+
+# lock_kernel, unlock_kernel
+# s/\bcfs_lock_kernel\b/lock_kernel/g
+# /#[ \t]*define[ \t]*\block_kernel\b *( *)[ \t]*\block_kernel\b *( *)/d
+# s/\bcfs_unlock_kernel\b/unlock_kernel/g
+# /#[ \t]*define[ \t]*\bunlock_kernel\b *( *)[ \t]*\bunlock_kernel\b *( *)/d
+
+# lockdep
+s/\bcfs_lock_class_key\b/lock_class_key/g
+s/\bcfs_lock_class_key_t\b/struct lock_class_key/g
+s/\bcfs_lockdep_set_class\b/lockdep_set_class/g
+s/\bcfs_lockdep_off\b/lockdep_off/g
+s/\bcfs_lockdep_on\b/lockdep_on/g
+/#[ \t]*define[ \t]*\blockdep_off\b *( *)[ \t]*\blockdep_off\b *( *)/d
+/#[ \t]*define[ \t]*\blockdep_on\b *( *)[ \t]*\blockdep_on\b *( *)/d
+/#[ \t]*define[ \t]*\blockdep_set_class\b *( *\w* *, *\w* *)[ \t]*\blockdep_set_class\b *( *\w* *, *\w* *)/d
+
+s/\bcfs_mutex_lock_nested\b/mutex_lock_nested/g
+#/#[ \t]*define[ \t]*\bmutex_lock_nested\b *( *\w* *, *\w* *)[ \t]*\bmutex_lock_nested\b *( *\w* *, *\w* *)/d
+/#define mutex_lock_nested(mutex, subclass) \\/{N;d}
+s/\bcfs_spin_lock_nested\b/spin_lock_nested/g
+/#[ \t]*define[ \t]*\bspin_lock_nested\b *( *\w* *, *\w* *)[ \t]*\bspin_lock_nested\b *( *\w* *, *\w* *)/d
+s/\bcfs_down_read_nested\b/down_read_nested/g
+/#[ \t]*define[ \t]*\bdown_read_nested\b *( *\w* *, *\w* *)[ \t]*\bdown_read_nested\b *( *\w* *, *\w* *)/d
+s/\bcfs_down_write_nested\b/down_write_nested/g
+/#[ \t]*define[ \t]*\bdown_write_nested\b *( *\w* *, *\w* *)[ \t]*\bdown_write_nested\b *( *\w* *, *\w* *)/d
+
+###############################################################################
+# bitops
+
+s/\bcfs_test_bit\b/test_bit/g
+/#[ \t]*define[ \t]*\btest_bit\b *( *\w* *, *\w* *)[ \t]*\btest_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_set_bit\b/set_bit/g
+/#[ \t]*define[ \t]*\bset_bit\b *( *\w* *, *\w* *)[ \t]*\bset_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_clear_bit\b/clear_bit/g
+/#[ \t]*define[ \t]*\bclear_bit\b *( *\w* *, *\w* *)[ \t]*\bclear_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_test_and_set_bit\b/test_and_set_bit/g
+/#[ \t]*define[ \t]*\btest_and_set_bit\b *( *\w* *, *\w* *)[ \t]*\btest_and_set_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_test_and_clear_bit\b/test_and_clear_bit/g
+/#[ \t]*define[ \t]*\btest_and_clear_bit\b *( *\w* *, *\w* *)[ \t]*\btest_and_clear_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_first_bit\b/find_first_bit/g
+/#[ \t]*define[ \t]*\bfind_first_bit\b *( *\w* *, *\w* *)[ \t]*\bfind_first_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_first_zero_bit\b/find_first_zero_bit/g
+/#[ \t]*define[ \t]*\bfind_first_zero_bit\b *( *\w* *, *\w* *)[ \t]*\bfind_first_zero_bit\b *( *\w* *, *\w* *)/d
+s/\bcfs_find_next_bit\b/find_next_bit/g
+/#[ \t]*define[ \t]*\bfind_next_bit\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bfind_next_bit\b *( *\w* *, *\w* *, *\w* *)/d
+s/\bcfs_find_next_zero_bit\b/find_next_zero_bit/g
+/#define find_next_zero_bit(addr, size, off) \\/{N;d}
+s/\bcfs_ffz\b/ffz/g
+/#[ \t]*define[ \t]*\bffz\b *( *\w* *)[ \t]*\bffz\b *( *\w* *)/d
+s/\bcfs_ffs\b/ffs/g
+/#[ \t]*define[ \t]*\bffs\b *( *\w* *)[ \t]*\bffs\b *( *\w* *)/d
+s/\bcfs_fls\b/fls/g
+/#[ \t]*define[ \t]*\bfls\b *( *\w* *)[ \t]*\bfls\b *( *\w* *)/d
+
+################################################################################
+# file operations
+
+#s/\bcfs_file_t\b/file_t/g
+#s/\bcfs_dentry_t\b/dentry_t/g
+#s/\bcfs_dirent_t\b/dirent_t/g
+#s/\bcfs_kstatfs_t\b/kstatfs_t/g
+#s/\bcfs_filp_size\b/filp_size/g
+#s/\bcfs_filp_poff\b/filp_poff/g
+#s/\bcfs_filp_open\b/filp_open/g
+#/#[ \t]*define[ \t]*\bfilp_open\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bfilp_open\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_do_fsync\b/do_fsync/g
+#s/\bcfs_filp_close\b/filp_close/g
+#/#[ \t]*define[ \t]*\bfilp_close\b *( *\w* *, *\w* *)[ \t]*\bfilp_close\b *( *\w* *, *\w* *)/d
+#s/\bcfs_filp_read\b/filp_read/g
+#s/\bcfs_filp_write\b/filp_write/g
+#s/\bcfs_filp_fsync\b/filp_fsync/g
+#s/\bcfs_get_file\b/get_file/g
+#/#[ \t]*define[ \t]*\bget_file\b *( *\w* *)[ \t]*\bget_file\b *( *\w* *)/d
+#s/\bcfs_get_fd\b/fget/g
+#/#[ \t]*define[ \t]*\bfget\b *( *\w* *)[ \t]*\bfget\b *( *\w* *)/d
+#s/\bcfs_put_file\b/fput/g
+#/#[ \t]*define[ \t]*\bfput\b *( *\w* *)[ \t]*\bfput\b *( *\w* *)/d
+#s/\bcfs_file_count\b/file_count/g
+#/#[ \t]*define[ \t]*\bfile_count\b *( *\w* *)[ \t]*\bfile_count\b *( *\w* *)/d
+#s/\bCFS_INT_LIMIT\b/INT_LIMIT/g
+#s/\bCFS_OFFSET_MAX\b/OFFSET_MAX/g
+#s/\bcfs_flock_t\b/flock_t/g
+#s/\bcfs_flock_type\b/flock_type/g
+#s/\bcfs_flock_set_type\b/flock_set_type/g
+#s/\bcfs_flock_pid\b/flock_pid/g
+#s/\bcfs_flock_set_pid\b/flock_set_pid/g
+#s/\bcfs_flock_start\b/flock_start/g
+#s/\bcfs_flock_set_start\b/flock_set_start/g
+#s/\bcfs_flock_end\b/flock_end/g
+#s/\bcfs_flock_set_end\b/flock_set_end/g
+#s/\bcfs_user_write\b/user_write/g
+#s/\bCFS_IFSHIFT\b/IFSHIFT/g
+#s/\bCFS_IFTODT\b/IFTODT/g
+#s/\bCFS_DTTOIF\b/DTTOIF/g
+
+################################################################################
+# memory operations
+
+#s/\bcfs_page_t\b/page_t/g
+#s/\bCFS_PAGE_SIZE\b/PAGE_CACHE_SIZE/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_SIZE\b[ \t]*\bPAGE_CACHE_SIZE\b/d
+#s/\bCFS_PAGE_SHIFT\b/PAGE_CACHE_SHIFT/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_SHIFT\b[ \t]*\bPAGE_CACHE_SHIFT\b/d
+#s/\bCFS_PAGE_MASK\b/PAGE_CACHE_MASK/g
+#/#[ \t]*define[ \t]*\bPAGE_CACHE_MASK\b[ \t]*\bPAGE_CACHE_MASK\b/d
+#s/\bcfs_num_physpages\b/num_physpages/g
+#/#[ \t]*define[ \t]*\bnum_physpages\b[ \t]*\bnum_physpages\b/d
+#s/\bcfs_copy_from_user\b/copy_from_user/g
+#/#[ \t]*define[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_from_user\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_copy_to_user\b/copy_to_user/g
+#/#[ \t]*define[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)[ \t]*\bcopy_to_user\b *( *\w* *, *\w* *, *\w* *)/d
+#s/\bcfs_page_address\b/page_address/g
+#/#[ \t]*define[ \t]*\bpage_address\b *( *\w* *)[ \t]*\bpage_address\b *( *\w* *)/d
+#s/\bcfs_kmap\b/kmap/g
+#/#[ \t]*define[ \t]*\bkmap\b *( *\w* *)[ \t]*\bkmap\b *( *\w* *)/d
+#s/\bcfs_kunmap\b/kunmap/g
+#/#[ \t]*define[ \t]*\bkunmap\b *( *\w* *)[ \t]*\bkunmap\b *( *\w* *)/d
+#s/\bcfs_get_page\b/get_page/g
+#/#[ \t]*define[ \t]*\bget_page\b *( *\w* *)[ \t]*\bget_page\b *( *\w* *)/d
+#s/\bcfs_page_count\b/page_count/g
+#/#[ \t]*define[ \t]*\bpage_count\b *( *\w* *)[ \t]*\bpage_count\b *( *\w* *)/d
+#s/\bcfs_page_index\b/page_index/g
+#/#[ \t]*define[ \t]*\bpage_index\b *( *\w* *)[ \t]*\bpage_index\b *( *\w* *)/d
+#s/\bcfs_page_pin\b/page_cache_get/g
+#/#[ \t]*define[ \t]*\bpage_cache_get\b *( *\w* *)[ \t]*\bpage_cache_get\b *( *\w* *)/d
+#s/\bcfs_page_unpin\b/page_cache_release/g
+#/#[ \t]*define[ \t]*\bpage_cache_release\b *( *\w* *)[ \t]*\bpage_cache_release\b *( *\w* *)/d
+#s/\bcfs_memory_pressure_get\b/memory_pressure_get/g
+#s/\bcfs_memory_pressure_set\b/memory_pressure_set/g
+#s/\bcfs_memory_pressure_clr\b/memory_pressure_clr/g
+#s/\bCFS_NUM_CACHEPAGES\b/NUM_CACHEPAGES/g
+# memory allocator
+#s/\bCFS_ALLOC_ATOMIC\b/GFP_ATOMIC/g
+#/#[ \t]*define[ \t]*\bGFP_ATOMIC\b[ \t]*\bGFP_ATOMIC\b/d
+#s/\bCFS_ALLOC_WAIT\b/__GFP_WAIT/g
+#/#[ \t]*define[ \t]*\b__GFP_WAIT\b[ \t]*\b__GFP_WAIT\b/d
+#s/\bCFS_ALLOC_ZERO\b/__GFP_ZERO/g
+#/#[ \t]*define[ \t]*\b__GFP_ZERO\b[ \t]*\b__GFP_ZERO\b/d
+#s/\bCFS_ALLOC_FS\b/__GFP_FS/g
+#/#[ \t]*define[ \t]*\b__GFP_FS\b[ \t]*\b__GFP_FS\b/d
+#s/\bCFS_ALLOC_IO\b/__GFP_IO/g
+#/#[ \t]*define[ \t]*\b__GFP_IO\b[ \t]*\b__GFP_IO\b/d
+#s/\bCFS_ALLOC_NOWARN\b/__GFP_NOWARN/g
+#/#[ \t]*define[ \t]*\b__GFP_NOWARN\b[ \t]*\b__GFP_NOWARN\b/d
+#s/\bCFS_ALLOC_STD\b/GFP_IOFS/g
+#/#[ \t]*define[ \t]*\bGFP_IOFS\b[ \t]*\bGFP_IOFS\b/d
+#s/\bCFS_ALLOC_USER\b/GFP_KERNEL/g
+#/#[ \t]*define[ \t]*\bGFP_KERNEL\b[ \t]*\bGFP_KERNEL\b/d
+#s/\bCFS_ALLOC_HIGHMEM\b/__GFP_HIGHMEM/g
+#/#[ \t]*define[ \t]*\b__GFP_HIGHMEM\b[ \t]*\b__GFP_HIGHMEM\b/d
+#s/\bCFS_ALLOC_HIGHUSER\b/GFP_HIGHUSER/g
+#/#[ \t]*define[ \t]*\bGFP_HIGHUSER\b[ \t]*\bGFP_HIGHUSER\b/d
+#s/\bCFS_ALLOC_ATOMIC_TRY\b/ALLOC_ATOMIC_TRY/g
+#s/\bcfs_alloc\b/kmalloc/g
+#/#[ \t]*define[ \t]*\bkmalloc\b *( *\w* *, *\w* *)[ \t]*\bkmalloc\b *( *\w* *, *\w* *)/d
+#s/\bcfs_free\b/kfree/g
+#/#[ \t]*define[ \t]*\bkfree\b *( *\w* *)[ \t]*\bkfree\b *( *\w* *)/d
+#s/\bcfs_alloc_large\b/vmalloc/g
+#/#[ \t]*define[ \t]*\bvmalloc\b *( *\w* *)[ \t]*\bvmalloc\b *( *\w* *)/d
+#s/\bcfs_free_large\b/vfree/g
+#/#[ \t]*define[ \t]*\bvfree\b *( *\w* *)[ \t]*\bvfree\b *( *\w* *)/d
+#s/\bcfs_alloc_page\b/alloc_page/g
+#/#[ \t]*define[ \t]*\balloc_page\b *( *\w* *)[ \t]*\balloc_page\b *( *\w* *)/d
+#s/\bcfs_free_page\b/__free_page/g
+#/#[ \t]*define[ \t]*\b__free_page\b *( *\w* *)[ \t]*\b__free_page\b *( *\w* *)/d
+# TODO: SLAB allocator
+#s/\bCFS_DECL_MMSPACE\b/DECL_MMSPACE/g
+#s/\bCFS_MMSPACE_OPEN\b/MMSPACE_OPEN/g
+#s/\bCFS_MMSPACE_CLOSE\b/MMSPACE_CLOSE/g
+#s/\bCFS_SLAB_HWCACHE_ALIGN\b/SLAB_HWCACHE_ALIGN/g
+#/#[ \t]*define[ \t]*\bSLAB_HWCACHE_ALIGN\b[ \t]*\bSLAB_HWCACHE_ALIGN\b/d
+#s/\bCFS_SLAB_KERNEL\b/SLAB_KERNEL/g
+#/#[ \t]*define[ \t]*\bSLAB_KERNEL\b[ \t]*\bSLAB_KERNEL\b/d
+#s/\bCFS_SLAB_NOFS\b/SLAB_NOFS/g
+#/#[ \t]*define[ \t]*\bSLAB_NOFS\b[ \t]*\bSLAB_NOFS\b/d
+#s/\bcfs_shrinker\b/shrinker/g
+#/#[ \t]*define[ \t]*\bshrinker\b[ \t]*\bshrinker\b/d
+#s/\bcfs_shrinker_t\b/shrinker_t/g
+#/typedef[ \t]*\bshrinker_t\b[ \t]*\bshrinker_t\b/d
+#s/\bcfs_set_shrinker\b/set_shrinker/g
+#/#[ \t]*define[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)[ \t]*\bset_shrinker\b *( *\w* *, *\w* *)/d
+#s/\bcfs_remove_shrinker\b/remove_shrinker/g
+#/#[ \t]*define[ \t]*\bremove_shrinker\b *( *\w* *)[ \t]*\bremove_shrinker\b *( *\w* *)/d
+#s/\bCFS_DEFAULT_SEEKS\b/DEFAULT_SEEKS/g
+#/#[ \t]*define[ \t]*\bDEFAULT_SEEKS\b[ \t]*\bDEFAULT_SEEKS\b/d
+
+
+#s/\bcfs_\b//g
+#s/\bCFS_\b//g
+#/typedef[ \t]*\b\b[ \t]*\b\b/d
+#/#[ \t]*define[ \t]*\b\b[ \t]*\b\b/d
+#/#[ \t]*define[ \t]*\b\b *( *)[ \t]*\b\b *( *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *)[ \t]*\b\b *( *\w* *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *, *\w* *)[ \t]*\b\b *( *\w* *, *\w* *)/d
+#/#[ \t]*define[ \t]*\b\b *( *\w* *, *\w* *, *\w* *)[ \t]*\b\b *( *\w* *, *\w* *, *\w* *)/d
index 5436f3c..5991ccd 100644 (file)
@@ -62,32 +62,32 @@ cfs_bitmap_t *CFS_ALLOCATE_BITMAP(int size)
 static inline
 void cfs_bitmap_set(cfs_bitmap_t *bitmap, int nbit)
 {
-        cfs_set_bit(nbit, bitmap->data);
+       set_bit(nbit, bitmap->data);
 }
 
 static inline
 void cfs_bitmap_clear(cfs_bitmap_t *bitmap, int nbit)
 {
-        cfs_test_and_clear_bit(nbit, bitmap->data);
+       test_and_clear_bit(nbit, bitmap->data);
 }
 
 static inline
 int cfs_bitmap_check(cfs_bitmap_t *bitmap, int nbit)
 {
-        return cfs_test_bit(nbit, bitmap->data);
+       return test_bit(nbit, bitmap->data);
 }
 
 static inline
 int cfs_bitmap_test_and_clear(cfs_bitmap_t *bitmap, int nbit)
 {
-        return cfs_test_and_clear_bit(nbit, bitmap->data);
+       return test_and_clear_bit(nbit, bitmap->data);
 }
 
 /* return 0 is bitmap has none set bits */
 static inline
 int cfs_bitmap_check_empty(cfs_bitmap_t *bitmap)
 {
-        return cfs_find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
+       return find_first_bit(bitmap->data, bitmap->size) == bitmap->size;
 }
 
 static inline
@@ -101,9 +101,9 @@ void cfs_bitmap_copy(cfs_bitmap_t *new, cfs_bitmap_t *old)
        new->size = newsize;
 }
 
-#define cfs_foreach_bit(bitmap, pos) \
-       for((pos)=cfs_find_first_bit((bitmap)->data, bitmap->size);   \
-           (pos) < (bitmap)->size;                               \
-           (pos) = cfs_find_next_bit((bitmap)->data, (bitmap)->size, (pos)+1))
+#define cfs_foreach_bit(bitmap, pos)                                   \
+       for ((pos) = find_first_bit((bitmap)->data, bitmap->size);      \
+            (pos) < (bitmap)->size;                                    \
+            (pos) = find_next_bit((bitmap)->data, (bitmap)->size, (pos) + 1))
 
 #endif
index 95203c9..8033c0f 100644 (file)
 
 /*
  * spin_lock (use Linux kernel's primitives)
- * 
+ *
  * - spin_lock_init(x)
  * - spin_lock(x)
  * - spin_unlock(x)
  * - spin_trylock(x)
- * 
+ *
  * - spin_lock_irqsave(x, f)
  * - spin_unlock_irqrestore(x, f)
  */
index ca785b8..47f3408 100644 (file)
@@ -131,8 +131,8 @@ struct cfs_hash_lock_ops;
 struct cfs_hash_hlist_ops;
 
 typedef union {
-        cfs_rwlock_t                rw;             /**< rwlock */
-        cfs_spinlock_t              spin;           /**< spinlock */
+       rwlock_t                rw;             /**< rwlock */
+       spinlock_t              spin;           /**< spinlock */
 } cfs_hash_lock_t;
 
 /**
@@ -307,7 +307,7 @@ typedef struct cfs_hash {
         cfs_hash_bucket_t         **hs_rehash_buckets;
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
         /** serialize debug members */
-        cfs_spinlock_t              hs_dep_lock;
+       spinlock_t                      hs_dep_lock;
         /** max depth */
         unsigned int                hs_dep_max;
         /** id of the deepest bucket */
index aaaac7c..5c55887 100644 (file)
@@ -470,7 +470,7 @@ struct cfs_percpt_lock {
        /* exclusively locked */
        unsigned int            pcl_locked;
        /* private lock table */
-       cfs_spinlock_t          **pcl_locks;
+       spinlock_t              **pcl_locks;
 };
 
 /* return number of private locks */
index 229ce4c..d625bc6 100644 (file)
  */
 #include <linux/bitops.h>
 
-#define cfs_test_bit(nr, addr)              test_bit(nr, addr)
-#define cfs_set_bit(nr, addr)               set_bit(nr, addr)
-#define cfs_clear_bit(nr, addr)             clear_bit(nr, addr)
-#define cfs_test_and_set_bit(nr, addr)      test_and_set_bit(nr, addr)
-#define cfs_test_and_clear_bit(nr, addr)    test_and_clear_bit(nr, addr)
-#define cfs_find_first_bit(addr, size)      find_first_bit(addr, size)
-#define cfs_find_first_zero_bit(addr, size) find_first_zero_bit(addr, size)
-#define cfs_find_next_bit(addr, size, off)  find_next_bit(addr, size, off)
-#define cfs_find_next_zero_bit(addr, size, off) \
-        find_next_zero_bit(addr, size, off)
 
-#define cfs_ffz(x)                          ffz(x)
-#define cfs_ffs(x)                          ffs(x)
-#define cfs_fls(x)                          fls(x)
index cc2ca51..943459f 100644 (file)
  * spinlock "implementation"
  */
 
-typedef spinlock_t cfs_spinlock_t;
-
-#define cfs_spin_lock_init(lock)             spin_lock_init(lock)
-#define cfs_spin_lock(lock)                  spin_lock(lock)
-#define cfs_spin_lock_bh(lock)               spin_lock_bh(lock)
-#define cfs_spin_lock_bh_init(lock)          spin_lock_bh_init(lock)
-#define cfs_spin_unlock(lock)                spin_unlock(lock)
-#define cfs_spin_unlock_bh(lock)             spin_unlock_bh(lock)
-#define cfs_spin_trylock(lock)               spin_trylock(lock)
-#define cfs_spin_is_locked(lock)             spin_is_locked(lock)
-
-#define cfs_spin_lock_irq(lock)              spin_lock_irq(lock)
-#define cfs_spin_unlock_irq(lock)            spin_unlock_irq(lock)
-#define cfs_read_lock_irqsave(lock, f)       read_lock_irqsave(lock, f)
-#define cfs_write_lock_irqsave(lock, f)      write_lock_irqsave(lock, f)
-#define cfs_write_unlock_irqrestore(lock, f) write_unlock_irqrestore(lock, f)
-#define cfs_spin_lock_irqsave(lock, f)       spin_lock_irqsave(lock, f)
-#define cfs_spin_unlock_irqrestore(lock, f)  spin_unlock_irqrestore(lock, f)
+
+
 
 /*
  * rw_semaphore "implementation" (use Linux kernel's primitives)
@@ -110,19 +94,10 @@ typedef spinlock_t cfs_spinlock_t;
  * - down_write(x)
  * - up_write(x)
  */
-typedef struct rw_semaphore cfs_rw_semaphore_t;
 
-#define cfs_init_rwsem(s)         init_rwsem(s)
-#define cfs_down_read(s)          down_read(s)
-#define cfs_down_read_trylock(s)  down_read_trylock(s)
-#define cfs_up_read(s)            up_read(s)
-#define cfs_down_write(s)         down_write(s)
-#define cfs_down_write_trylock(s) down_write_trylock(s)
-#define cfs_up_write(s)           up_write(s)
 
-#define cfs_fini_rwsem(s)         do {} while(0)
+#define fini_rwsem(s)          do {} while (0)
 
-#define CFS_DECLARE_RWSEM(name)   DECLARE_RWSEM(name)
 
 /*
  * rwlock_t "implementation" (use Linux kernel's primitives)
@@ -137,17 +112,7 @@ typedef struct rw_semaphore cfs_rw_semaphore_t;
  *
  * - RW_LOCK_UNLOCKED
  */
-typedef rwlock_t cfs_rwlock_t;
-
-#define cfs_rwlock_init(lock)                  rwlock_init(lock)
-#define cfs_read_lock(lock)                    read_lock(lock)
-#define cfs_read_unlock(lock)                  read_unlock(lock)
-#define cfs_read_unlock_irqrestore(lock,flags) \
-        read_unlock_irqrestore(lock, flags)
-#define cfs_write_lock(lock)                   write_lock(lock)
-#define cfs_write_unlock(lock)                 write_unlock(lock)
-#define cfs_write_lock_bh(lock)                write_lock_bh(lock)
-#define cfs_write_unlock_bh(lock)              write_unlock_bh(lock)
+
 
 #ifndef DEFINE_RWLOCK
 #define DEFINE_RWLOCK(lock)    rwlock_t lock = __RW_LOCK_UNLOCKED(lock)
@@ -165,18 +130,7 @@ typedef rwlock_t cfs_rwlock_t;
  * - wait_for_completion_interruptible(c)
  * - fini_completion(c)
  */
-typedef struct completion cfs_completion_t;
-
-#define CFS_DECLARE_COMPLETION(work)             DECLARE_COMPLETION(work)
-#define CFS_INIT_COMPLETION(c)                   INIT_COMPLETION(c)
-#define CFS_COMPLETION_INITIALIZER(work)         COMPLETION_INITIALIZER(work)
-#define cfs_init_completion(c)                   init_completion(c)
-#define cfs_complete(c)                          complete(c)
-#define cfs_wait_for_completion(c)               wait_for_completion(c)
-#define cfs_wait_for_completion_interruptible(c) \
-        wait_for_completion_interruptible(c)
-#define cfs_complete_and_exit(c, code)           complete_and_exit(c, code)
-#define cfs_fini_completion(c)                   do { } while (0)
+#define fini_completion(c) do { } while (0)
 
 /*
  * semaphore "implementation" (use Linux kernel's primitives)
@@ -187,19 +141,6 @@ typedef struct completion cfs_completion_t;
  * - down_interruptible(sem)
  * - down_trylock(sem)
  */
-typedef struct semaphore      cfs_semaphore_t;
-
-#ifdef DEFINE_SEMAPHORE
-#define CFS_DEFINE_SEMAPHORE(name)          DEFINE_SEMAPHORE(name)
-#else
-#define CFS_DEFINE_SEMAPHORE(name)          DECLARE_MUTEX(name)
-#endif
-
-#define cfs_sema_init(sem, val)             sema_init(sem, val)
-#define cfs_up(x)                           up(x)
-#define cfs_down(x)                         down(x)
-#define cfs_down_interruptible(x)           down_interruptible(x)
-#define cfs_down_trylock(x)                 down_trylock(x)
 
 /*
  * mutex "implementation" (use Linux kernel's primitives)
@@ -212,17 +153,6 @@ typedef struct semaphore      cfs_semaphore_t;
  * - mutex_is_locked(x)
  * - mutex_destroy(x)
  */
-typedef struct mutex cfs_mutex_t;
-
-#define CFS_DEFINE_MUTEX(name)             DEFINE_MUTEX(name)
-
-#define cfs_mutex_init(x)                   mutex_init(x)
-#define cfs_mutex_lock(x)                   mutex_lock(x)
-#define cfs_mutex_unlock(x)                 mutex_unlock(x)
-#define cfs_mutex_lock_interruptible(x)     mutex_lock_interruptible(x)
-#define cfs_mutex_trylock(x)                mutex_trylock(x)
-#define cfs_mutex_is_locked(x)              mutex_is_locked(x)
-#define cfs_mutex_destroy(x)                mutex_destroy(x)
 
 #ifndef lockdep_set_class
 
@@ -232,62 +162,43 @@ typedef struct mutex cfs_mutex_t;
  *
  **************************************************************************/
 
-typedef struct cfs_lock_class_key {
-        ;
-} cfs_lock_class_key_t;
+struct lock_class_key {
+       ;
+};
 
-#define cfs_lockdep_set_class(lock, key) \
-        do { (void)sizeof (lock);(void)sizeof (key); } while (0)
-/* This has to be a macro, so that `subclass' can be undefined in kernels that
- * do not support lockdep. */
+#define lockdep_set_class(lock, key) \
+       do { (void)sizeof(lock); (void)sizeof(key); } while (0)
+/* This has to be a macro, so that `subclass' can be undefined in kernels
+ * that do not support lockdep. */
 
 
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
 {
 }
 
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
 {
 }
 #else
-typedef struct lock_class_key cfs_lock_class_key_t;
 
-#define cfs_lockdep_set_class(lock, key) lockdep_set_class(lock, key)
-#define cfs_lockdep_off()                lockdep_off()
-#define cfs_lockdep_on()                 lockdep_on()
 #endif /* lockdep_set_class */
 
 #ifndef CONFIG_DEBUG_LOCK_ALLOC
 #ifndef mutex_lock_nested
-#define cfs_mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
-#else
-#define cfs_mutex_lock_nested(mutex, subclass) \
-        mutex_lock_nested(mutex, subclass)
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
 #endif
 
 #ifndef spin_lock_nested
-#define cfs_spin_lock_nested(lock, subclass) spin_lock(lock)
-#else
-#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
 #endif
 
 #ifndef down_read_nested
-#define cfs_down_read_nested(lock, subclass) down_read(lock)
-#else
-#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
+#define down_read_nested(lock, subclass) down_read(lock)
 #endif
 
 #ifndef down_write_nested
-#define cfs_down_write_nested(lock, subclass) down_write(lock)
-#else
-#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
+#define down_write_nested(lock, subclass) down_write(lock)
 #endif
-#else /* CONFIG_DEBUG_LOCK_ALLOC is defined */
-#define cfs_mutex_lock_nested(mutex, subclass) \
-        mutex_lock_nested(mutex, subclass)
-#define cfs_spin_lock_nested(lock, subclass) spin_lock_nested(lock, subclass)
-#define cfs_down_read_nested(lock, subclass) down_read_nested(lock, subclass)
-#define cfs_down_write_nested(lock, subclass) down_write_nested(lock, subclass)
 #endif /* CONFIG_DEBUG_LOCK_ALLOC */
 
 
index b3a493d..bbf260f 100644 (file)
 // XXX BUG 1511 -- remove this stanza and all callers when bug 1511 is resolved
 
 #define SIGNAL_MASK_LOCK(task, flags)                                  \
-  spin_lock_irqsave(&task->sighand->siglock, flags)
+       spin_lock_irqsave(&task->sighand->siglock, flags)
 #define SIGNAL_MASK_UNLOCK(task, flags)                                \
-  spin_unlock_irqrestore(&task->sighand->siglock, flags)
+       spin_unlock_irqrestore(&task->sighand->siglock, flags)
 #define USERMODEHELPER(path, argv, envp)                               \
-  call_usermodehelper(path, argv, envp, 1)
+       call_usermodehelper(path, argv, envp, 1)
 #define RECALC_SIGPENDING         recalc_sigpending()
 #define CLEAR_SIGPENDING          clear_tsk_thread_flag(current,       \
                                                         TIF_SIGPENDING)
index 0f80cdd..64785c2 100644 (file)
@@ -116,15 +116,15 @@ struct upcall_cache_ops {
 };
 
 struct upcall_cache {
-        cfs_list_t              uc_hashtable[UC_CACHE_HASH_SIZE];
-        cfs_spinlock_t          uc_lock;
-        cfs_rwlock_t            uc_upcall_rwlock;
-
-        char                    uc_name[40];            /* for upcall */
-        char                    uc_upcall[UC_CACHE_UPCALL_MAXPATH];
-        int                     uc_acquire_expire;      /* seconds */
-        int                     uc_entry_expire;        /* seconds */
-        struct upcall_cache_ops *uc_ops;
+       cfs_list_t              uc_hashtable[UC_CACHE_HASH_SIZE];
+       spinlock_t              uc_lock;
+       rwlock_t                uc_upcall_rwlock;
+
+       char                    uc_name[40];            /* for upcall */
+       char                    uc_upcall[UC_CACHE_UPCALL_MAXPATH];
+       int                     uc_acquire_expire;      /* seconds */
+       int                     uc_entry_expire;        /* seconds */
+       struct upcall_cache_ops *uc_ops;
 };
 
 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
index c371c80..927ede9 100644 (file)
@@ -74,7 +74,7 @@ typedef struct poll_table_struct                cfs_poll_table_t;
 #define cfs_seq_open(file, ops, rc)             (rc = seq_open(file, ops))
 
 /* in lprocfs_stat.c, to protect the private data for proc entries */
-extern cfs_rw_semaphore_t       _lprocfs_lock;
+extern struct rw_semaphore             _lprocfs_lock;
 
 /* to begin from 2.6.23, Linux defines self file_operations (proc_reg_file_ops)
  * in procfs, the proc file_operation defined by Lustre (lprocfs_generic_fops)
@@ -86,14 +86,14 @@ extern cfs_rw_semaphore_t       _lprocfs_lock;
  */
 #ifndef HAVE_PROCFS_USERS
 
-#define LPROCFS_ENTRY()                 \
-do {                                    \
-        cfs_down_read(&_lprocfs_lock);  \
+#define LPROCFS_ENTRY()                \
+do {                                   \
+       down_read(&_lprocfs_lock);      \
 } while(0)
 
-#define LPROCFS_EXIT()                  \
-do {                                    \
-        cfs_up_read(&_lprocfs_lock);    \
+#define LPROCFS_EXIT()                 \
+do {                                   \
+       up_read(&_lprocfs_lock);        \
 } while(0)
 
 #else
@@ -121,14 +121,15 @@ int LPROCFS_ENTRY_AND_CHECK(struct proc_dir_entry *dp)
 static inline
 int LPROCFS_ENTRY_AND_CHECK(struct proc_dir_entry *dp)
 {
-        int deleted = 0;
-        spin_lock(&(dp)->pde_unload_lock);
-        if (dp->proc_fops == NULL)
-                deleted = 1;
-        spin_unlock(&(dp)->pde_unload_lock);
-        if (deleted)
-                return -ENODEV;
-        return 0;
+       int deleted = 0;
+
+       spin_lock(&(dp)->pde_unload_lock);
+       if (dp->proc_fops == NULL)
+               deleted = 1;
+       spin_unlock(&(dp)->pde_unload_lock);
+       if (deleted)
+               return -ENODEV;
+       return 0;
 }
 #else /* !HAVE_PROCFS_DELETED*/
 static inline
@@ -148,14 +149,14 @@ do {                                    \
         up_read(&_lprocfs_lock);        \
 } while(0)
 
-#define LPROCFS_WRITE_ENTRY()           \
-do {                                    \
-        cfs_down_write(&_lprocfs_lock); \
+#define LPROCFS_WRITE_ENTRY()          \
+do {                                   \
+       down_write(&_lprocfs_lock);     \
 } while(0)
 
-#define LPROCFS_WRITE_EXIT()            \
-do {                                    \
-        cfs_up_write(&_lprocfs_lock);   \
+#define LPROCFS_WRITE_EXIT()           \
+do {                                   \
+       up_write(&_lprocfs_lock);       \
 } while(0)
 #else /* !LPROCFS */
 
@@ -186,7 +187,7 @@ typedef struct cfs_seq_file {
         size_t                     count;
         loff_t                     index;
         loff_t                     version;
-        cfs_mutex_t                lock;
+       struct mutex                    lock;
         struct cfs_seq_operations *op;
         void                      *private;
 } cfs_seq_file_t;
index 3e667f1..51aba34 100644 (file)
@@ -40,7 +40,7 @@
 #define __LIBCFS_USER_BITOPS_H__
 
 /* test if bit nr is set in bitmap addr; returns previous value of bit nr */
-static __inline__ int cfs_test_and_set_bit(int nr, unsigned long *addr)
+static inline int test_and_set_bit(int nr, unsigned long *addr)
 {
         unsigned long mask;
 
@@ -51,10 +51,10 @@ static __inline__ int cfs_test_and_set_bit(int nr, unsigned long *addr)
         return nr;
 }
 
-#define cfs_set_bit(n, a) cfs_test_and_set_bit(n, a)
+#define set_bit(n, a) test_and_set_bit(n, a)
 
 /* clear bit nr in bitmap addr; returns previous value of bit nr*/
-static __inline__ int cfs_test_and_clear_bit(int nr, unsigned long *addr)
+static inline int test_and_clear_bit(int nr, unsigned long *addr)
 {
         unsigned long mask;
 
@@ -65,9 +65,9 @@ static __inline__ int cfs_test_and_clear_bit(int nr, unsigned long *addr)
         return nr;
 }
 
-#define cfs_clear_bit(n, a) cfs_test_and_clear_bit(n, a)
+#define clear_bit(n, a) test_and_clear_bit(n, a)
 
-static __inline__ int cfs_test_bit(int nr, const unsigned long *addr)
+static inline int test_bit(int nr, const unsigned long *addr)
 {
         return ((1UL << (nr & (BITS_PER_LONG - 1))) &
                 ((addr)[nr / BITS_PER_LONG])) != 0;
@@ -148,14 +148,13 @@ static __inline__ unsigned long __cfs_ffs(long data)
 #define __cfs_ffz(x)   __cfs_ffs(~(x))
 #define __cfs_flz(x)   __cfs_fls(~(x))
 
-unsigned long cfs_find_next_bit(unsigned long *addr,
-                                unsigned long size, unsigned long offset);
+unsigned long find_next_bit(unsigned long *addr,
+                           unsigned long size, unsigned long offset);
 
-unsigned long cfs_find_next_zero_bit(unsigned long *addr,
-                                     unsigned long size, unsigned long offset);
+unsigned long find_next_zero_bit(unsigned long *addr,
+                                unsigned long size, unsigned long offset);
 
-#define cfs_find_first_bit(addr,size)     (cfs_find_next_bit((addr),(size),0))
-#define cfs_find_first_zero_bit(addr,size)  \
-        (cfs_find_next_zero_bit((addr),(size),0))
+#define find_first_bit(addr, size)       find_next_bit((addr), (size),0)
+#define find_first_zero_bit(addr, size)  find_next_zero_bit((addr), (size),0)
 
 #endif
index cd4983b..0605308 100644 (file)
  */
 
 /*
- * cfs_spin_lock
+ * spin_lock
  *
- * - cfs_spin_lock_init(x)
- * - cfs_spin_lock(x)
- * - cfs_spin_unlock(x)
- * - cfs_spin_trylock(x)
- * - cfs_spin_lock_bh_init(x)
- * - cfs_spin_lock_bh(x)
- * - cfs_spin_unlock_bh(x)
+ * - spin_lock_init(x)
+ * - spin_lock(x)
+ * - spin_unlock(x)
+ * - spin_trylock(x)
+ * - spin_lock_bh_init(x)
+ * - spin_lock_bh(x)
+ * - spin_unlock_bh(x)
  *
- * - cfs_spin_is_locked(x)
- * - cfs_spin_lock_irqsave(x, f)
- * - cfs_spin_unlock_irqrestore(x, f)
+ * - spin_is_locked(x)
+ * - spin_lock_irqsave(x, f)
+ * - spin_unlock_irqrestore(x, f)
  *
  * No-op implementation.
  */
-struct cfs_spin_lock {int foo;};
+struct spin_lock { int foo; };
 
-typedef struct cfs_spin_lock cfs_spinlock_t;
+typedef struct spin_lock spinlock_t;
 
-#define DEFINE_SPINLOCK(lock)          cfs_spinlock_t lock = { }
-#define LASSERT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LINVRNT_SPIN_LOCKED(lock) do {(void)sizeof(lock);} while(0)
-#define LASSERT_SEM_LOCKED(sem) do {(void)sizeof(sem);} while(0)
-#define LASSERT_MUTEX_LOCKED(x) do {(void)sizeof(x);} while(0)
+#define DEFINE_SPINLOCK(lock)          spinlock_t lock = { }
+#define LASSERT_SPIN_LOCKED(lock)      do { (void)sizeof(lock); } while (0)
+#define LINVRNT_SPIN_LOCKED(lock)      do { (void)sizeof(lock); } while (0)
+#define LASSERT_SEM_LOCKED(sem)                do { (void)sizeof(sem); } while (0)
+#define LASSERT_MUTEX_LOCKED(x)                do { (void)sizeof(x); } while (0)
 
-void cfs_spin_lock_init(cfs_spinlock_t *lock);
-void cfs_spin_lock(cfs_spinlock_t *lock);
-void cfs_spin_unlock(cfs_spinlock_t *lock);
-int cfs_spin_trylock(cfs_spinlock_t *lock);
-void cfs_spin_lock_bh_init(cfs_spinlock_t *lock);
-void cfs_spin_lock_bh(cfs_spinlock_t *lock);
-void cfs_spin_unlock_bh(cfs_spinlock_t *lock);
+void spin_lock_init(spinlock_t *lock);
+void spin_lock(spinlock_t *lock);
+void spin_unlock(spinlock_t *lock);
+int  spin_trylock(spinlock_t *lock);
+void spin_lock_bh_init(spinlock_t *lock);
+void spin_lock_bh(spinlock_t *lock);
+void spin_unlock_bh(spinlock_t *lock);
 
-static inline int cfs_spin_is_locked(cfs_spinlock_t *l) {return 1;}
-static inline void cfs_spin_lock_irqsave(cfs_spinlock_t *l, unsigned long f){}
-static inline void cfs_spin_unlock_irqrestore(cfs_spinlock_t *l,
-                                              unsigned long f){}
+static inline int spin_is_locked(spinlock_t *l) { return 1; }
+static inline void spin_lock_irqsave(spinlock_t *l, unsigned long f) {}
+static inline void spin_unlock_irqrestore(spinlock_t *l, unsigned long f) {}
 
 /*
  * Semaphore
  *
- * - cfs_sema_init(x, v)
+ * - sema_init(x, v)
  * - __down(x)
  * - __up(x)
  */
-typedef struct cfs_semaphore {
-    int foo;
-} cfs_semaphore_t;
+struct semaphore {
+       int foo;
+};
 
-void cfs_sema_init(cfs_semaphore_t *s, int val);
-void __up(cfs_semaphore_t *s);
-void __down(cfs_semaphore_t *s);
-int __down_interruptible(cfs_semaphore_t *s);
+void sema_init(struct semaphore *s, int val);
+void __up(struct semaphore *s);
+void __down(struct semaphore *s);
+int __down_interruptible(struct semaphore *s);
 
-#define CFS_DEFINE_SEMAPHORE(name)      cfs_semaphore_t name = { 1 }
+#define DEFINE_SEMAPHORE(name)      struct semaphore name = { 1 }
 
-#define cfs_up(s)                       __up(s)
-#define cfs_down(s)                     __down(s)
-#define cfs_down_interruptible(s)       __down_interruptible(s)
+#define up(s)                          __up(s)
+#define down(s)                        __down(s)
+#define down_interruptible(s)          __down_interruptible(s)
 
-static inline int cfs_down_trylock(cfs_semaphore_t *sem)
+static inline int down_trylock(struct semaphore *sem)
 {
         return 0;
 }
@@ -135,94 +134,102 @@ static inline int cfs_down_trylock(cfs_semaphore_t *sem)
 /*
  * Completion:
  *
- * - cfs_init_completion_module(c)
- * - cfs_call_wait_handler(t)
- * - cfs_init_completion(c)
- * - cfs_complete(c)
- * - cfs_wait_for_completion(c)
- * - cfs_wait_for_completion_interruptible(c)
+ * - init_completion_module(c)
+ * - call_wait_handler(t)
+ * - init_completion(c)
+ * - complete(c)
+ * - wait_for_completion(c)
+ * - wait_for_completion_interruptible(c)
  */
-typedef struct {
-        unsigned int done;
-        cfs_waitq_t wait;
-} cfs_completion_t;
+struct completion {
+       unsigned int done;
+       cfs_waitq_t wait;
+};
 
-typedef int (*cfs_wait_handler_t) (int timeout);
-void cfs_init_completion_module(cfs_wait_handler_t handler);
-int  cfs_call_wait_handler(int timeout);
-void cfs_init_completion(cfs_completion_t *c);
-void cfs_complete(cfs_completion_t *c);
-void cfs_wait_for_completion(cfs_completion_t *c);
-int cfs_wait_for_completion_interruptible(cfs_completion_t *c);
+typedef int (*wait_handler_t) (int timeout);
+void init_completion_module(wait_handler_t handler);
+int  call_wait_handler(int timeout);
+void init_completion(struct completion *c);
+void complete(struct completion *c);
+void wait_for_completion(struct completion *c);
+int wait_for_completion_interruptible(struct completion *c);
 
-#define CFS_COMPLETION_INITIALIZER(work) \
-        { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+#define COMPLETION_INITIALIZER(work) \
+       { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
 
-#define CFS_DECLARE_COMPLETION(work) \
-        cfs_completion_t work = CFS_COMPLETION_INITIALIZER(work)
 
-#define CFS_INIT_COMPLETION(x)      ((x).done = 0)
+#define INIT_COMPLETION(x)     ((x).done = 0)
 
 
 /*
- * cfs_rw_semaphore:
+ * rw_semaphore:
  *
- * - cfs_init_rwsem(x)
- * - cfs_down_read(x)
- * - cfs_down_read_trylock(x)
- * - cfs_down_write(struct cfs_rw_semaphore *s);
- * - cfs_down_write_trylock(struct cfs_rw_semaphore *s);
- * - cfs_up_read(x)
- * - cfs_up_write(x)
- * - cfs_fini_rwsem(x)
+ * - init_rwsem(x)
+ * - down_read(x)
+ * - down_read_trylock(x)
+ * - down_write(struct rw_semaphore *s);
+ * - down_write_trylock(struct rw_semaphore *s);
+ * - up_read(x)
+ * - up_write(x)
+ * - fini_rwsem(x)
  */
-typedef struct cfs_rw_semaphore {
-        int foo;
-} cfs_rw_semaphore_t;
-
-void cfs_init_rwsem(cfs_rw_semaphore_t *s);
-void cfs_down_read(cfs_rw_semaphore_t *s);
-int cfs_down_read_trylock(cfs_rw_semaphore_t *s);
-void cfs_down_write(cfs_rw_semaphore_t *s);
-int cfs_down_write_trylock(cfs_rw_semaphore_t *s);
-void cfs_up_read(cfs_rw_semaphore_t *s);
-void cfs_up_write(cfs_rw_semaphore_t *s);
-void cfs_fini_rwsem(cfs_rw_semaphore_t *s);
-#define CFS_DECLARE_RWSEM(name)  cfs_rw_semaphore_t name = { }
+struct rw_semaphore {
+       int foo;
+};
+
+void init_rwsem(struct rw_semaphore *s);
+void down_read(struct rw_semaphore *s);
+int down_read_trylock(struct rw_semaphore *s);
+void down_write(struct rw_semaphore *s);
+int down_write_trylock(struct rw_semaphore *s);
+void up_read(struct rw_semaphore *s);
+void up_write(struct rw_semaphore *s);
+void fini_rwsem(struct rw_semaphore *s);
+#define DECLARE_RWSEM(name)  struct rw_semaphore name = { }
 
 /*
  * read-write lock : Need to be investigated more!!
  * XXX nikita: for now, let rwlock_t to be identical to rw_semaphore
  *
- * - cfs_rwlock_init(x)
- * - cfs_read_lock(x)
- * - cfs_read_unlock(x)
- * - cfs_write_lock(x)
- * - cfs_write_unlock(x)
- * - cfs_write_lock_irqsave(x)
- * - cfs_write_unlock_irqrestore(x)
- * - cfs_read_lock_irqsave(x)
- * - cfs_read_unlock_irqrestore(x)
+ * - rwlock_init(x)
+ * - read_lock(x)
+ * - read_unlock(x)
+ * - write_lock(x)
+ * - write_unlock(x)
+ * - write_lock_irqsave(x)
+ * - write_unlock_irqrestore(x)
+ * - read_lock_irqsave(x)
+ * - read_unlock_irqrestore(x)
  */
-typedef cfs_rw_semaphore_t cfs_rwlock_t;
-#define DEFINE_RWLOCK(lock)    cfs_rwlock_t lock = { }
+#define rwlock_t               struct rw_semaphore
+#define DEFINE_RWLOCK(lock)    rwlock_t lock = { }
+
+#define rwlock_init(pl)                init_rwsem(pl)
 
-#define cfs_rwlock_init(pl)         cfs_init_rwsem(pl)
+#define read_lock(l)           down_read(l)
+#define read_unlock(l)         up_read(l)
+#define write_lock(l)          down_write(l)
+#define write_unlock(l)                up_write(l)
+
+static inline void write_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+       write_lock(l);
+}
 
-#define cfs_read_lock(l)            cfs_down_read(l)
-#define cfs_read_unlock(l)          cfs_up_read(l)
-#define cfs_write_lock(l)           cfs_down_write(l)
-#define cfs_write_unlock(l)         cfs_up_write(l)
+static inline void write_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+       write_unlock(l);
+}
 
-static inline void
-cfs_write_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_write_lock(l); }
-static inline void
-cfs_write_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_write_unlock(l); }
+static inline void read_lock_irqsave(rwlock_t *l, unsigned long f)
+{
+       read_lock(l);
+}
 
-static inline void
-cfs_read_lock_irqsave(cfs_rwlock_t *l, unsigned long f) { cfs_read_lock(l); }
-static inline void
-cfs_read_unlock_irqrestore(cfs_rwlock_t *l, unsigned long f) { cfs_read_unlock(l); }
+static inline void read_unlock_irqrestore(rwlock_t *l, unsigned long f)
+{
+       read_unlock(l);
+}
 
 /*
  * Atomic for single-threaded user-space
@@ -260,26 +267,26 @@ typedef struct {
         int c_done;
         pthread_cond_t c_cond;
         pthread_mutex_t c_mut;
-} cfs_mt_completion_t;
+} mt_completion_t;
 
-void cfs_mt_init_completion(cfs_mt_completion_t *c);
-void cfs_mt_fini_completion(cfs_mt_completion_t *c);
-void cfs_mt_complete(cfs_mt_completion_t *c);
-void cfs_mt_wait_for_completion(cfs_mt_completion_t *c);
+void mt_init_completion(mt_completion_t *c);
+void mt_fini_completion(mt_completion_t *c);
+void mt_complete(mt_completion_t *c);
+void mt_wait_for_completion(mt_completion_t *c);
 
 /*
  * Multi-threaded user space atomic APIs
  */
 
-typedef struct { volatile int counter; } cfs_mt_atomic_t;
+typedef struct { volatile int counter; } mt_atomic_t;
 
-int cfs_mt_atomic_read(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b);
-int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_inc(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_dec(cfs_mt_atomic_t *a);
-void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a);
-void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
+int mt_atomic_read(mt_atomic_t *a);
+void mt_atomic_set(mt_atomic_t *a, int b);
+int mt_atomic_dec_and_test(mt_atomic_t *a);
+void mt_atomic_inc(mt_atomic_t *a);
+void mt_atomic_dec(mt_atomic_t *a);
+void mt_atomic_add(int b, mt_atomic_t *a);
+void mt_atomic_sub(int b, mt_atomic_t *a);
 
 #endif /* HAVE_LIBPTHREAD */
 
@@ -288,28 +295,28 @@ void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a);
  * Mutex interface.
  *
  **************************************************************************/
-typedef struct cfs_semaphore cfs_mutex_t;
+#define mutex semaphore
 
-#define CFS_DEFINE_MUTEX(m) CFS_DEFINE_SEMAPHORE(m)
+#define DEFINE_MUTEX(m) DEFINE_SEMAPHORE(m)
 
-static inline void cfs_mutex_init(cfs_mutex_t *mutex)
+static inline void mutex_init(struct mutex *mutex)
 {
-        cfs_sema_init(mutex, 1);
+       sema_init(mutex, 1);
 }
 
-static inline void cfs_mutex_lock(cfs_mutex_t *mutex)
+static inline void mutex_lock(struct mutex *mutex)
 {
-        cfs_down(mutex);
+       down(mutex);
 }
 
-static inline void cfs_mutex_unlock(cfs_mutex_t *mutex)
+static inline void mutex_unlock(struct mutex *mutex)
 {
-        cfs_up(mutex);
+       up(mutex);
 }
 
-static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
+static inline int mutex_lock_interruptible(struct mutex *mutex)
 {
-        return cfs_down_interruptible(mutex);
+       return down_interruptible(mutex);
 }
 
 /**
@@ -321,12 +328,12 @@ static inline int cfs_mutex_lock_interruptible(cfs_mutex_t *mutex)
  * \retval 1 try-lock succeeded (lock acquired).
  * \retval 0 indicates lock contention.
  */
-static inline int cfs_mutex_trylock(cfs_mutex_t *mutex)
+static inline int mutex_trylock(struct mutex *mutex)
 {
-        return !cfs_down_trylock(mutex);
+       return !down_trylock(mutex);
 }
 
-static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
+static inline void mutex_destroy(struct mutex *lock)
 {
 }
 
@@ -338,7 +345,7 @@ static inline void cfs_mutex_destroy(cfs_mutex_t *lock)
  *
  * \retval 0 mutex is not locked. This should never happen.
  */
-static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
+static inline int mutex_is_locked(struct mutex *lock)
 {
         return 1;
 }
@@ -350,27 +357,26 @@ static inline int cfs_mutex_is_locked(cfs_mutex_t *lock)
  *
  **************************************************************************/
 
-typedef struct cfs_lock_class_key {
+struct lock_class_key {
         int foo;
-} cfs_lock_class_key_t;
+};
 
-static inline void cfs_lockdep_set_class(void *lock,
-                                         cfs_lock_class_key_t *key)
+static inline void lockdep_set_class(void *lock, struct lock_class_key *key)
 {
 }
 
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
 {
 }
 
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
 {
 }
 
-#define cfs_mutex_lock_nested(mutex, subclass) cfs_mutex_lock(mutex)
-#define cfs_spin_lock_nested(lock, subclass) cfs_spin_lock(lock)
-#define cfs_down_read_nested(lock, subclass) cfs_down_read(lock)
-#define cfs_down_write_nested(lock, subclass) cfs_down_write(lock)
+#define mutex_lock_nested(mutex, subclass) mutex_lock(mutex)
+#define spin_lock_nested(lock, subclass) spin_lock(lock)
+#define down_read_nested(lock, subclass) down_read(lock)
+#define down_write_nested(lock, subclass) down_write(lock)
 
 
 /* !__KERNEL__ */
index ac8650e..ec660e0 100644 (file)
@@ -75,24 +75,24 @@ char * ul2dstr(ulong_ptr_t address, char *buf, int len);
 
 unsigned long simple_strtoul(const char *cp,char **endp, unsigned int base);
 
-static inline int cfs_set_bit(int nr, void * addr)
+static inline int set_bit(int nr, void * addr)
 {
     (((volatile ULONG *) addr)[nr >> 5]) |= (1UL << (nr & 31));
     return *((int *) addr);
 }
 
-static inline int cfs_test_bit(int nr, void * addr)
+static inline int test_bit(int nr, void * addr)
 {
     return (int)(((1UL << (nr & 31)) & (((volatile ULONG *) addr)[nr >> 5])) != 0);
 }
 
-static inline int cfs_clear_bit(int nr, void * addr)
+static inline int clear_bit(int nr, void * addr)
 {
     (((volatile ULONG *) addr)[nr >> 5]) &= (~(1UL << (nr & 31)));
     return *((int *) addr);
 }
 
-static inline int cfs_test_and_set_bit(int nr, volatile void *addr)
+static inline int test_and_set_bit(int nr, volatile void *addr)
 {
     int rc;
     unsigned char  mask;
@@ -106,11 +106,11 @@ static inline int cfs_test_and_set_bit(int nr, volatile void *addr)
     return rc;
 }
 
-#define ext2_set_bit(nr,addr)   (cfs_set_bit(nr, addr), 0)
-#define ext2_clear_bit(nr,addr)        (cfs_clear_bit(nr, addr), 0)
-#define ext2_test_bit(nr,addr)  cfs_test_bit(nr, addr)
+#define ext2_set_bit(nr, addr)         (set_bit(nr, addr), 0)
+#define ext2_clear_bit(nr, addr)       (clear_bit(nr, addr), 0)
+#define ext2_test_bit(nr, addr)                test_bit(nr, addr)
 
-static inline int cfs_ffs(int x)
+static inline int ffs(int x)
 {
         int r = 1;
 
@@ -178,7 +178,7 @@ static inline unsigned long __cfs_ffs(unsigned long word)
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
 static inline
-int cfs_fls(int x)
+int fls(int x)
 {
         int r = 32;
 
@@ -207,7 +207,7 @@ int cfs_fls(int x)
         return r;
 }
 
-static inline unsigned cfs_find_first_bit(const unsigned long *addr,
+static inline unsigned find_first_bit(const unsigned long *addr,
                                           unsigned size)
 {
         unsigned x = 0;
index af09129..c78101b 100644 (file)
@@ -238,7 +238,7 @@ struct inode {
         int             i_uid;
         int             i_gid;
         __u32           i_flags;
-        cfs_mutex_t     i_sem;
+       struct mutex    i_sem;
         void *          i_priv;
 };
 
index 169cc02..7a5e9fe 100644 (file)
@@ -58,7 +58,7 @@
  *  spinlock & event definitions
  */
 
-typedef struct cfs_spin_lock cfs_spinlock_t;
+typedef struct spin_lock spinlock_t;
 
 /* atomic */
 
@@ -86,7 +86,7 @@ int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v);
 #define cfs_atomic_inc_return(v)  cfs_atomic_add_return(1, v)
 #define cfs_atomic_dec_return(v)  cfs_atomic_sub_return(1, v)
 
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock);
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock);
 
 /* event */
 
@@ -213,43 +213,43 @@ cfs_clear_event(event_t * event)
  *
  */
 
-struct cfs_spin_lock {
-    KSPIN_LOCK lock;
-    KIRQL      irql;
+struct spin_lock {
+       KSPIN_LOCK      lock;
+       KIRQL           irql;
 };
 
-#define CFS_DECL_SPIN(name)  cfs_spinlock_t name;
-#define CFS_DECL_SPIN_EXTERN(name)  extern cfs_spinlock_t name;
+#define CFS_DECL_SPIN(name)            spinlock_t name;
+#define CFS_DECL_SPIN_EXTERN(name)     extern spinlock_t name;
 
 #define DEFINE_SPINLOCK {0}
 
-static inline void cfs_spin_lock_init(cfs_spinlock_t *lock)
+static inline void spin_lock_init(spinlock_t *lock)
 {
-    KeInitializeSpinLock(&(lock->lock));
+       KeInitializeSpinLock(&(lock->lock));
 }
 
-static inline void cfs_spin_lock(cfs_spinlock_t *lock)
+static inline void spin_lock(spinlock_t *lock)
 {
-    KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+       KeAcquireSpinLock(&(lock->lock), &(lock->irql));
 }
 
-static inline void cfs_spin_lock_nested(cfs_spinlock_t *lock, unsigned subclass)
+static inline void spin_lock_nested(spinlock_t *lock, unsigned subclass)
 {
-    KeAcquireSpinLock(&(lock->lock), &(lock->irql));
+       KeAcquireSpinLock(&(lock->lock), &(lock->irql));
 }
 
-static inline void cfs_spin_unlock(cfs_spinlock_t *lock)
+static inline void spin_unlock(spinlock_t *lock)
 {
-    KIRQL       irql = lock->irql;
-    KeReleaseSpinLock(&(lock->lock), irql);
+       KIRQL   irql = lock->irql;
+       KeReleaseSpinLock(&(lock->lock), irql);
 }
 
 
-#define cfs_spin_lock_irqsave(lock, flags)  \
-do {(flags) = 0; cfs_spin_lock(lock);} while(0)
+#define spin_lock_irqsave(lock, flags)  \
+       do { (flags) = 0; spin_lock(lock); } while (0)
 
-#define cfs_spin_unlock_irqrestore(lock, flags) \
-do {cfs_spin_unlock(lock);} while(0)
+#define spin_unlock_irqrestore(lock, flags) \
+       do { spin_unlock(lock); } while (0)
 
 
 /* There's no  corresponding routine in windows kernel.
@@ -259,78 +259,78 @@ do {cfs_spin_unlock(lock);} while(0)
 
 extern int libcfs_mp_system;
 
-static int cfs_spin_trylock(cfs_spinlock_t *lock)
+static int spin_trylock(spinlock_t *lock)
 {
-    KIRQL   Irql;
-    int     rc = 0;
+       KIRQL   Irql;
+       int     rc = 0;
 
-    ASSERT(lock != NULL);
+       ASSERT(lock != NULL);
 
-    KeRaiseIrql(DISPATCH_LEVEL, &Irql);
+       KeRaiseIrql(DISPATCH_LEVEL, &Irql);
 
-    if (libcfs_mp_system) {
-        if (0 == (ulong_ptr_t)lock->lock) {
+       if (libcfs_mp_system) {
+               if (0 == (ulong_ptr_t)lock->lock) {
 #if _X86_
-            __asm {
-                mov  edx, dword ptr [ebp + 8]
-                lock bts dword ptr[edx], 0
-                jb   lock_failed
-                mov  rc, TRUE
-            lock_failed:
-            }
+                       __asm {
+                               mov  edx, dword ptr [ebp + 8]
+                               lock bts dword ptr[edx], 0
+                               jb   lock_failed
+                               mov  rc, TRUE
+                               lock_failed:
+                       }
 #else
-        KdBreakPoint();
+                       KdBreakPoint();
 #endif
 
-        }
-    } else {
-        rc = TRUE;
-    }
+               }
+       } else {
+               rc = TRUE;
+       }
 
-    if (rc) {
-        lock->irql = Irql;
-    } else {
-        KeLowerIrql(Irql);
-    }
+       if (rc) {
+               lock->irql = Irql;
+       } else {
+               KeLowerIrql(Irql);
+       }
 
-    return rc;
+       return rc;
 }
 
-static int cfs_spin_is_locked(cfs_spinlock_t *lock)
+static int spin_is_locked(spinlock_t *lock)
 {
 #if _WIN32_WINNT >= 0x502
-    /* KeTestSpinLock only avalilable on 2k3 server or later */
-    return (!KeTestSpinLock(&lock->lock));
+       /* KeTestSpinLock only avalilable on 2k3 server or later */
+       return !KeTestSpinLock(&lock->lock);
 #else
-    return (int) (lock->lock);
+       return (int) (lock->lock);
 #endif
 }
 
 /* synchronization between cpus: it will disable all DPCs
    kernel task scheduler on the CPU */
-#define cfs_spin_lock_bh(x)                cfs_spin_lock(x)
-#define cfs_spin_unlock_bh(x)      cfs_spin_unlock(x)
-#define cfs_spin_lock_bh_init(x)       cfs_spin_lock_init(x)
+#define spin_lock_bh(x)                spin_lock(x)
+#define spin_unlock_bh(x)      spin_unlock(x)
+#define spin_lock_bh_init(x)   spin_lock_init(x)
 
 /*
- * cfs_rw_semaphore (using ERESOURCE)
+ * rw_semaphore (using ERESOURCE)
  */
 
 
-typedef struct cfs_rw_semaphore {
-    ERESOURCE   rwsem;
-} cfs_rw_semaphore_t;
+struct rw_semaphore {
+       ERESOURCE       rwsem;
+};
 
 
-#define CFS_DECLARE_RWSEM(name) cfs_rw_semaphore_t name
-#define CFS_DECLARE_RWSEM_EXTERN(name) extern cfs_rw_semaphore_t name
+#define DECLARE_RWSEM(name) struct rw_semaphore name
+#define CFS_DECLARE_RWSEM_EXTERN(name) extern struct rw_semaphore name
 
 /*
- * cfs_init_rwsem
- *   To initialize the the cfs_rw_semaphore_t structure
+ * init_rwsem
+ *   To initialize the the rw_semaphore structure
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the rw_semaphore structure
  *
  * Return Value:
  *   N/A
@@ -339,18 +339,18 @@ typedef struct cfs_rw_semaphore {
  *   N/A
  */
 
-static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
+static inline void init_rwsem(struct rw_semaphore *s)
 {
        ExInitializeResourceLite(&s->rwsem);
 }
-#define rwsem_init cfs_init_rwsem
+#define rwsem_init init_rwsem
 
 /*
- * cfs_fini_rwsem
- *   To finilize/destroy the the cfs_rw_semaphore_t structure
+ * fini_rwsem
+ *   To finilize/destroy the the rw_semaphore structure
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the rw_semaphore structure
  *
  * Return Value:
  *   N/A
@@ -360,17 +360,17 @@ static inline void cfs_init_rwsem(cfs_rw_semaphore_t *s)
  *   Just define it NULL for other systems.
  */
 
-static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
+static inline void fini_rwsem(struct rw_semaphore *s)
 {
-    ExDeleteResourceLite(&s->rwsem);
+       ExDeleteResourceLite(&s->rwsem);
 }
 
 /*
- * cfs_down_read
- *   To acquire read-lock of the cfs_rw_semaphore
+ * down_read
+ *   To acquire read-lock of the rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   N/A
@@ -379,19 +379,19 @@ static inline void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void cfs_down_read(cfs_rw_semaphore_t *s)
+static inline void down_read(struct rw_semaphore *s)
 {
        ExAcquireResourceSharedLite(&s->rwsem, TRUE);
 }
-#define cfs_down_read_nested cfs_down_read
+#define down_read_nested down_read
 
 
 /*
- * cfs_down_read_trylock
- *   To acquire read-lock of the cfs_rw_semaphore without blocking
+ * down_read_trylock
+ *   To acquire read-lock of the rw_semaphore without blocking
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   Zero: failed to acquire the read lock
@@ -401,18 +401,18 @@ static inline void cfs_down_read(cfs_rw_semaphore_t *s)
  *   This routine will return immediately without waiting.
  */
 
-static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
+static inline int down_read_trylock(struct rw_semaphore *s)
 {
        return ExAcquireResourceSharedLite(&s->rwsem, FALSE);
 }
 
 
 /*
- * cfs_down_write
- *   To acquire write-lock of the cfs_rw_semaphore
+ * down_write
+ *   To acquire write-lock of the struct rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   N/A
@@ -421,18 +421,18 @@ static inline int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void cfs_down_write(cfs_rw_semaphore_t *s)
+static inline void down_write(struct rw_semaphore *s)
 {
        ExAcquireResourceExclusiveLite(&(s->rwsem), TRUE);
 }
-#define cfs_down_write_nested cfs_down_write
+#define down_write_nested down_write
 
 /*
  * down_write_trylock
- *   To acquire write-lock of the cfs_rw_semaphore without blocking
+ *   To acquire write-lock of the rw_semaphore without blocking
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   Zero: failed to acquire the write lock
@@ -442,18 +442,18 @@ static inline void cfs_down_write(cfs_rw_semaphore_t *s)
  *   This routine will return immediately without waiting.
  */
 
-static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
+static inline int down_write_trylock(struct rw_semaphore *s)
 {
-    return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
+       return ExAcquireResourceExclusiveLite(&(s->rwsem), FALSE);
 }
 
 
 /*
- * cfs_up_read
- *   To release read-lock of the cfs_rw_semaphore
+ * up_read
+ *   To release read-lock of the rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   N/A
@@ -462,20 +462,19 @@ static inline int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void cfs_up_read(cfs_rw_semaphore_t *s)
+static inline void up_read(struct rw_semaphore *s)
 {
-    ExReleaseResourceForThreadLite(
-            &(s->rwsem),
-            ExGetCurrentResourceThread());
+       ExReleaseResourceForThreadLite(&(s->rwsem),
+                                      ExGetCurrentResourceThread());
 }
 
 
 /*
- * cfs_up_write
- *   To release write-lock of the cfs_rw_semaphore
+ * up_write
+ *   To release write-lock of the rw_semaphore
  *
  * Arguments:
- *   rwsem:  pointer to the cfs_rw_semaphore_t structure
+ *   rwsem:  pointer to the struct rw_semaphore
  *
  * Return Value:
  *   N/A
@@ -484,11 +483,10 @@ static inline void cfs_up_read(cfs_rw_semaphore_t *s)
  *   N/A
  */
 
-static inline void cfs_up_write(cfs_rw_semaphore_t *s)
+static inline void up_write(struct rw_semaphore *s)
 {
-    ExReleaseResourceForThreadLite(
-                &(s->rwsem),
-                ExGetCurrentResourceThread());
+       ExReleaseResourceForThreadLite(&(s->rwsem),
+                                      ExGetCurrentResourceThread());
 }
 
 /*
@@ -502,37 +500,37 @@ static inline void cfs_up_write(cfs_rw_semaphore_t *s)
  */
 
 typedef struct {
-    cfs_spinlock_t guard;
-    int            count;
-} cfs_rwlock_t;
+       spinlock_t      guard;
+       int             count;
+} rwlock_t;
 
-void cfs_rwlock_init(cfs_rwlock_t * rwlock);
-void cfs_rwlock_fini(cfs_rwlock_t * rwlock);
+void rwlock_init(rwlock_t *rwlock);
+void cfs_rwlock_fini(rwlock_t *rwlock);
 
-void cfs_read_lock(cfs_rwlock_t * rwlock);
-void cfs_read_unlock(cfs_rwlock_t * rwlock);
-void cfs_write_lock(cfs_rwlock_t * rwlock);
-void cfs_write_unlock(cfs_rwlock_t * rwlock);
+void read_lock(rwlock_t *rwlock);
+void read_unlock(rwlock_t *rwlock);
+void write_lock(rwlock_t *rwlock);
+void write_unlock(rwlock_t *rwlock);
 
-#define cfs_write_lock_irqsave(l, f)     do {f = 0; cfs_write_lock(l);} while(0)
-#define cfs_write_unlock_irqrestore(l, f)   do {cfs_write_unlock(l);} while(0)
-#define cfs_read_lock_irqsave(l, f         do {f=0; cfs_read_lock(l);} while(0)
-#define cfs_read_unlock_irqrestore(l, f)    do {cfs_read_unlock(l);} while(0)
+#define write_lock_irqsave(l, f)       do { f = 0; write_lock(l); } while (0)
+#define write_unlock_irqrestore(l, f)  do { write_unlock(l); } while (0)
+#define read_lock_irqsave(l, f)                do { f = 0; read_lock(l); } while (0)
+#define read_unlock_irqrestore(l, f)   do { read_unlock(l); } while (0)
 
-#define cfs_write_lock_bh   cfs_write_lock
-#define cfs_write_unlock_bh cfs_write_unlock
+#define write_lock_bh          write_lock
+#define write_unlock_bh        write_unlock
 
-typedef struct cfs_lock_class_key {
-        int foo;
-} cfs_lock_class_key_t;
+struct lock_class_key {
+       int foo;
+};
 
-#define cfs_lockdep_set_class(lock, class) do {} while(0)
+#define lockdep_set_class(lock, class) do {} while (0)
 
-static inline void cfs_lockdep_off(void)
+static inline void lockdep_off(void)
 {
 }
 
-static inline void cfs_lockdep_on(void)
+static inline void lockdep_on(void)
 {
 }
 
@@ -544,38 +542,35 @@ static inline void cfs_lockdep_on(void)
  * - __up(x)
  */
 
-typedef struct cfs_semaphore {
+struct semaphore {
        KSEMAPHORE sem;
-} cfs_semaphore_t;
+};
 
-static inline void cfs_sema_init(cfs_semaphore_t *s, int val)
+static inline void sema_init(struct semaphore *s, int val)
 {
        KeInitializeSemaphore(&s->sem, val, val);
 }
 
-static inline void __down(cfs_semaphore_t *s)
+static inline void __down(struct semaphore *s)
 {
-   KeWaitForSingleObject( &(s->sem), Executive,
-                          KernelMode, FALSE, NULL );
+       KeWaitForSingleObject(&(s->sem), Executive, KernelMode, FALSE, NULL);
 
 }
-static inline void __up(cfs_semaphore_t *s)
+static inline void __up(struct semaphore *s)
 {
        KeReleaseSemaphore(&s->sem, 0, 1, FALSE);
 }
 
-static inline int down_trylock(cfs_semaphore_t *s)
+static inline int down_trylock(struct semaphore *s)
 {
-    LARGE_INTEGER  timeout = {0};
-    NTSTATUS status =
-        KeWaitForSingleObject( &(s->sem), Executive,
-                               KernelMode, FALSE, &timeout);
+       LARGE_INTEGER  timeout = {0};
+       NTSTATUS status = KeWaitForSingleObject(&(s->sem), Executive,
+                                               KernelMode, FALSE, &timeout);
 
-    if (status == STATUS_SUCCESS) {
-        return 0;
-    }
+       if (status == STATUS_SUCCESS)
+               return 0;
 
-    return 1;
+       return 1;
 }
 
 /*
@@ -587,9 +582,9 @@ static inline int down_trylock(cfs_semaphore_t *s)
  * - mutex_down(x)
  */
 
-typedef struct cfs_semaphore cfs_mutex_t;
+#define mutex semaphore
 
-#define CFS_DECLARE_MUTEX(x) cfs_mutex_t x
+#define CFS_DECLARE_MUTEX(x) struct mutex x
 
 /*
  * init_mutex
@@ -604,10 +599,10 @@ typedef struct cfs_semaphore cfs_mutex_t;
  * Notes:
  *   N/A
  */
-#define cfs_mutex_init cfs_init_mutex
-static inline void cfs_init_mutex(cfs_mutex_t *mutex)
+#define mutex_init cfs_init_mutex
+static inline void cfs_init_mutex(struct mutex *mutex)
 {
-    cfs_sema_init(mutex, 1);
+       sema_init(mutex, 1);
 }
 
 /*
@@ -624,22 +619,22 @@ static inline void cfs_init_mutex(cfs_mutex_t *mutex)
  *   N/A
  */
 
-static inline void cfs_mutex_down(cfs_mutex_t *mutex)
+static inline void cfs_mutex_down(struct mutex *mutex)
 {
-    __down(mutex);
+       __down(mutex);
 }
 
-static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex)
+static inline int cfs_mutex_down_interruptible(struct mutex *mutex)
 {
-    __down(mutex);
-    return 0;
+       __down(mutex);
+       return 0;
 }
 
-#define cfs_mutex_lock(m)         cfs_mutex_down(m)
-#define cfs_mutex_trylock(s)      down_trylock(s)
-#define cfs_mutex_lock_nested(m)  cfs_mutex_down(m)
-#define cfs_down(m)               cfs_mutex_down(m)
-#define cfs_down_interruptible(m) cfs_mutex_down_interruptible(m)
+#define mutex_lock(m)          cfs_mutex_down(m)
+#define mutex_trylock(s)       down_trylock(s)
+#define mutex_lock_nested(m)   cfs_mutex_down(m)
+#define down(m)                        cfs_mutex_down(m)
+#define down_interruptible(m)  cfs_mutex_down_interruptible(m)
 
 /*
  * mutex_up
@@ -655,13 +650,13 @@ static inline int cfs_mutex_down_interruptible(cfs_mutex_t *mutex)
  *   N/A
  */
 
-static inline void cfs_mutex_up(cfs_mutex_t *mutex)
+static inline void cfs_mutex_up(struct mutex *mutex)
 {
-    __up(mutex);
+       __up(mutex);
 }
 
-#define cfs_mutex_unlock(m) cfs_mutex_up(m)
-#define cfs_up(m)           cfs_mutex_up(m)
+#define mutex_unlock(m)                cfs_mutex_up(m)
+#define up(m)                  cfs_mutex_up(m)
 
 /*
  * init_mutex_locked
@@ -677,13 +672,13 @@ static inline void cfs_mutex_up(cfs_mutex_t *mutex)
  *   N/A
  */
 
-static inline void cfs_init_mutex_locked(cfs_mutex_t *mutex)
+static inline void cfs_init_mutex_locked(struct mutex *mutex)
 {
-    cfs_init_mutex(mutex);
-    cfs_mutex_down(mutex);
+       cfs_init_mutex(mutex);
+       cfs_mutex_down(mutex);
 }
 
-static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
+static inline void mutex_destroy(struct mutex *mutex)
 {
 }
 
@@ -695,9 +690,9 @@ static inline void cfs_mutex_destroy(cfs_mutex_t *mutex)
  * - wait_for_completion(c)
  */
 
-typedef struct {
+struct completion{
        event_t  event;
-} cfs_completion_t;
+};
 
 
 /*
@@ -714,7 +709,7 @@ typedef struct {
  *   N/A
  */
 
-static inline void cfs_init_completion(cfs_completion_t *c)
+static inline void init_completion(struct completion *c)
 {
        cfs_init_event(&(c->event), 1, FALSE);
 }
@@ -734,7 +729,7 @@ static inline void cfs_init_completion(cfs_completion_t *c)
  *   N/A
  */
 
-static inline void cfs_complete(cfs_completion_t *c)
+static inline void complete(struct completion *c)
 {
        cfs_wake_event(&(c->event));
 }
@@ -754,17 +749,16 @@ static inline void cfs_complete(cfs_completion_t *c)
  *   N/A
  */
 
-static inline void cfs_wait_for_completion(cfs_completion_t *c)
+static inline void wait_for_completion(struct completion *c)
 {
-    cfs_wait_event_internal(&(c->event), 0);
+       cfs_wait_event_internal(&(c->event), 0);
 }
 
-static inline int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
+static inline int wait_for_completion_interruptible(struct completion *c)
 {
-    cfs_wait_event_internal(&(c->event), 0);
-    return 0;
+       cfs_wait_event_internal(&(c->event), 0);
+       return 0;
 }
 
-#else  /* !__KERNEL__ */
 #endif /* !__KERNEL__ */
 #endif
index 619efc9..16112b2 100644 (file)
@@ -106,42 +106,42 @@ typedef struct cfs_page {
 
 /* Make it prettier to test the above... */
 #define UnlockPage(page)        unlock_page(page)
-#define Page_Uptodate(page)     cfs_test_bit(PG_uptodate, &(page)->flags)
-#define SetPageUptodate(page) \
+#define Page_Uptodate(page)     test_bit(PG_uptodate, &(page)->flags)
+#define SetPageUptodate(page)                                          \
        do {                                                            \
                arch_set_page_uptodate(page);                           \
-               cfs_set_bit(PG_uptodate, &(page)->flags);               \
+               set_bit(PG_uptodate, &(page)->flags);                   \
        } while (0)
-#define ClearPageUptodate(page) cfs_clear_bit(PG_uptodate, &(page)->flags)
-#define PageDirty(page)         cfs_test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page)      cfs_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page)    cfs_clear_bit(PG_dirty, &(page)->flags)
-#define PageLocked(page)        cfs_test_bit(PG_locked, &(page)->flags)
-#define LockPage(page)          cfs_set_bit(PG_locked, &(page)->flags)
-#define TryLockPage(page)       cfs_test_and_set_bit(PG_locked, &(page)->flags)
-#define PageChecked(page)       cfs_test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page)    cfs_set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page)  cfs_clear_bit(PG_checked, &(page)->flags)
-#define PageLaunder(page)       cfs_test_bit(PG_launder, &(page)->flags)
-#define SetPageLaunder(page)    cfs_set_bit(PG_launder, &(page)->flags)
-#define ClearPageLaunder(page)  cfs_clear_bit(PG_launder, &(page)->flags)
-#define ClearPageArch1(page)    cfs_clear_bit(PG_arch_1, &(page)->flags)
-
-#define PageError(page)                cfs_test_bit(PG_error, &(page)->flags)
-#define SetPageError(page)     cfs_set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page)   cfs_clear_bit(PG_error, &(page)->flags)
-#define PageReferenced(page)    cfs_test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page) cfs_set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page) cfs_clear_bit(PG_referenced, &(page)->flags)
-
-#define PageActive(page)        cfs_test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page)     cfs_set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page)   cfs_clear_bit(PG_active, &(page)->flags)
-
-#define PageWriteback(page)    cfs_test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) cfs_test_and_set_bit(PG_writeback,  \
+#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
+#define PageDirty(page)        test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page)     set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page)   clear_bit(PG_dirty, &(page)->flags)
+#define PageLocked(page)       test_bit(PG_locked, &(page)->flags)
+#define LockPage(page)         set_bit(PG_locked, &(page)->flags)
+#define TryLockPage(page)      test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page)      test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page)   set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+#define PageLaunder(page)      test_bit(PG_launder, &(page)->flags)
+#define SetPageLaunder(page)   set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
+#define ClearPageArch1(page)   clear_bit(PG_arch_1, &(page)->flags)
+
+#define PageError(page)        test_bit(PG_error, &(page)->flags)
+#define SetPageError(page)     set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
+#define PageReferenced(page)   test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
+
+#define PageActive(page)        test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page)     set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page)   clear_bit(PG_active, &(page)->flags)
+
+#define PageWriteback(page)    test_bit(PG_writeback, &(page)->flags)
+#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback,      \
                                                        &(page)->flags)
-#define TestClearPageWriteback(page) cfs_test_and_clear_bit(PG_writeback, \
+#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback,  \
                                                        &(page)->flags)
 
 #define __GFP_FS    (1)
index 4cf73c6..5ac1fc0 100644 (file)
@@ -319,7 +319,7 @@ struct seq_file {
        size_t count;
        loff_t index;
        u32    version;
-       cfs_mutex_t lock;
+       struct mutex            lock;
        const struct seq_operations *op;
        void *private;
 };
@@ -409,12 +409,11 @@ typedef int cfs_task_state_t;
 #define CFS_WAITLINK_MAGIC  'CWLM'
 
 typedef struct cfs_waitq {
+       unsigned int            magic;
+       unsigned int            flags;
 
-    unsigned int            magic;
-    unsigned int            flags;
-
-    cfs_spinlock_t          guard;
-    cfs_list_t              waiters;
+       spinlock_t              guard;
+       cfs_list_t              waiters;
 
 } cfs_waitq_t;
 
@@ -613,17 +612,15 @@ static inline void task_unlock(cfs_task_t *t)
 #define TASKSLT_MAGIC  'TSLT'   /* Task Slot */
 
 typedef struct _TASK_MAN {
+       ULONG           Magic;          /* Magic and Flags */
+       ULONG           Flags;
 
-    ULONG           Magic;      /* Magic and Flags */
-    ULONG           Flags;
-
-    cfs_spinlock_t  Lock;       /* Protection lock */
-
-    cfs_mem_cache_t *slab; /* Memory slab for task slot */
+       spinlock_t      Lock;           /* Protection lock */
 
-    ULONG           NumOfTasks; /* Total tasks (threads) */
-    LIST_ENTRY      TaskList;   /* List of task slots */
+       cfs_mem_cache_t *slab;          /* Memory slab for task slot */
 
+       ULONG           NumOfTasks;     /* Total tasks (threads) */
+       LIST_ENTRY      TaskList;       /* List of task slots */
 } TASK_MAN, *PTASK_MAN;
 
 typedef struct _TASK_SLOT {
index 7f9e45e..d014d7f 100644 (file)
@@ -232,11 +232,11 @@ typedef struct _KS_TSDU_MDL {
 } KS_TSDU_MDL, *PKS_TSDU_MDL;
 
 typedef struct ks_engine_mgr {
-    cfs_spinlock_t          lock;
-    int                     stop;
-    event_t                 exit;
-    event_t                 start;
-    cfs_list_t              list;
+       spinlock_t      lock;
+       int             stop;
+       event_t         exit;
+       event_t         start;
+       cfs_list_t      list;
 } ks_engine_mgr_t;
 
 typedef struct ks_engine_slot {
@@ -248,19 +248,19 @@ typedef struct ks_engine_slot {
 } ks_engine_slot_t;
 
 typedef struct _KS_TSDUMGR {
-    cfs_list_t              TsduList;
-    ULONG                   NumOfTsdu;
-    ULONG                   TotalBytes;
-    KEVENT                  Event;
-    cfs_spinlock_t          Lock;
-    ks_engine_slot_t        Slot;
-    ULONG                   Payload;
-    int                     Busy:1;
-    int                     OOB:1;
+       cfs_list_t              TsduList;
+       ULONG                   NumOfTsdu;
+       ULONG                   TotalBytes;
+       KEVENT                  Event;
+       spinlock_t              Lock;
+       ks_engine_slot_t        Slot;
+       ULONG                   Payload;
+       int                     Busy:1;
+       int                     OOB:1;
 } KS_TSDUMGR, *PKS_TSDUMGR;
 
-#define ks_lock_tsdumgr(mgr)   cfs_spin_lock(&((mgr)->Lock))
-#define ks_unlock_tsdumgr(mgr) cfs_spin_unlock(&((mgr)->Lock))
+#define ks_lock_tsdumgr(mgr)   spin_lock(&((mgr)->Lock))
+#define ks_unlock_tsdumgr(mgr) spin_unlock(&((mgr)->Lock))
 
 typedef struct _KS_CHAIN {
     KS_TSDUMGR          Normal;      /* normal queue */
@@ -423,7 +423,7 @@ struct socket {
         ulong                       kstc_magic;      /* Magic & Flags */
         ulong                       kstc_flags;
 
-        cfs_spinlock_t              kstc_lock;       /* serialise lock*/
+       spinlock_t                  kstc_lock;       /* serialise lock*/
         void *                      kstc_conn;       /* ks_conn_t */
 
         ks_tconn_type_t             kstc_type;          /* tdi connection Type */
@@ -614,15 +614,14 @@ typedef struct ks_addr_slot {
 } ks_addr_slot_t;
 
 typedef struct {
+       /*
+        * Tdi client information
+        */
 
-    /*
-     * Tdi client information
-     */
+       UNICODE_STRING  ksnd_client_name;       /* tdi client module name */
+       HANDLE          ksnd_pnp_handle;        /* the handle for pnp changes */
 
-    UNICODE_STRING        ksnd_client_name; /* tdi client module name */
-    HANDLE                ksnd_pnp_handle;  /* the handle for pnp changes */
-
-    cfs_spinlock_t        ksnd_addrs_lock;  /* serialize ip address list access */
+       spinlock_t      ksnd_addrs_lock;        /* serialize ip address list */
     LIST_ENTRY            ksnd_addrs_list;  /* list of the ip addresses */
     int                   ksnd_naddrs;      /* number of the ip addresses */
 
@@ -634,15 +633,15 @@ typedef struct {
 
     TDI_PROVIDER_INFO     ksnd_provider;        /* tdi tcp/ip provider's information */
 
-    cfs_spinlock_t        ksnd_tconn_lock;      /* tdi connections access serialise */
+       spinlock_t      ksnd_tconn_lock;        /* tdi connections access lock*/
+
+       int             ksnd_ntconns;           /* number of tconns in list */
+       cfs_list_t      ksnd_tconns;            /* tdi connections list */
+       cfs_mem_cache_t *ksnd_tconn_slab;       /* ks_tconn_t allocation slabs*/
+       event_t         ksnd_tconn_exit;        /* event signal by last tconn */
 
-    int                   ksnd_ntconns;         /* number of tconns attached in list */
-    cfs_list_t            ksnd_tconns;          /* tdi connections list */
-    cfs_mem_cache_t *     ksnd_tconn_slab;      /* slabs for ks_tconn_t allocations */
-    event_t               ksnd_tconn_exit;      /* exit event to be signaled by the last tconn */
+       spinlock_t      ksnd_tsdu_lock;         /* tsdu access serialise */
 
-    cfs_spinlock_t        ksnd_tsdu_lock;       /* tsdu access serialise */
-        
     int                   ksnd_ntsdus;          /* number of tsdu buffers allocated */
     ulong                 ksnd_tsdu_size;       /* the size of a signel tsdu buffer */
     cfs_mem_cache_t       *ksnd_tsdu_slab;       /* slab cache for tsdu buffer allocation */
index 06c028d..c3f0b56 100644 (file)
@@ -57,23 +57,23 @@ struct cfs_zone_nob {
 };
 
 static struct cfs_zone_nob      cfs_zone_nob;
-static spinlock_t               cfs_zone_guard;
+static spinlock_t              cfs_zone_guard;
 
 cfs_mem_cache_t *mem_cache_find(const char *name, size_t objsize)
 {
-        cfs_mem_cache_t         *walker = NULL;
+       cfs_mem_cache_t         *walker = NULL;
 
-        LASSERT(cfs_zone_nob.z_nob != NULL);
+       LASSERT(cfs_zone_nob.z_nob != NULL);
 
-        spin_lock(&cfs_zone_guard);
-        list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
-                if (!strcmp(walker->mc_name, name) && \
-                    walker->mc_size == objsize)
-                        break;
-        }
-        spin_unlock(&cfs_zone_guard);
+       spin_lock(&cfs_zone_guard);
+       list_for_each_entry(walker, cfs_zone_nob.z_nob, mc_link) {
+               if (!strcmp(walker->mc_name, name) && \
+                   walker->mc_size == objsize)
+                       break;
+       }
+       spin_unlock(&cfs_zone_guard);
 
-        return walker;
+       return walker;
 }
 
 /*
@@ -270,18 +270,18 @@ static void raw_page_finish(struct xnu_raw_page *pg)
 
 void raw_page_death_row_clean(void)
 {
-        struct xnu_raw_page *pg;
+       struct xnu_raw_page *pg;
 
-        spin_lock(&page_death_row_phylax);
-        while (!list_empty(&page_death_row)) {
-                pg = container_of(page_death_row.next,
-                                  struct xnu_raw_page, link);
-                list_del(&pg->link);
-                spin_unlock(&page_death_row_phylax);
-                raw_page_finish(pg);
-                spin_lock(&page_death_row_phylax);
-        }
-        spin_unlock(&page_death_row_phylax);
+       spin_lock(&page_death_row_phylax);
+       while (!list_empty(&page_death_row)) {
+               pg = container_of(page_death_row.next,
+                                 struct xnu_raw_page, link);
+               list_del(&pg->link);
+               spin_unlock(&page_death_row_phylax);
+               raw_page_finish(pg);
+               spin_lock(&page_death_row_phylax);
+       }
+       spin_unlock(&page_death_row_phylax);
 }
 
 /* Free a "page" */
@@ -289,20 +289,20 @@ void free_raw_page(struct xnu_raw_page *pg)
 {
        if (!atomic_dec_and_test(&pg->count))
                return;
-        /*
-         * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
-         * block. (raw_page_done()->upl_abort() can block too) On the other
-         * hand, cfs_free_page() may be called in non-blockable context. To
-         * work around this, park pages on global list when cannot block.
-         */
-        if (get_preemption_level() > 0) {
-                spin_lock(&page_death_row_phylax);
-                list_add(&pg->link, &page_death_row);
-                spin_unlock(&page_death_row_phylax);
-        } else {
-                raw_page_finish(pg);
-                raw_page_death_row_clean();
-        }
+       /*
+        * kmem_free()->vm_map_remove()->vm_map_delete()->lock_write() may
+        * block. (raw_page_done()->upl_abort() can block too) On the other
+        * hand, cfs_free_page() may be called in non-blockable context. To
+        * work around this, park pages on global list when cannot block.
+        */
+       if (get_preemption_level() > 0) {
+               spin_lock(&page_death_row_phylax);
+               list_add(&pg->link, &page_death_row);
+               spin_unlock(&page_death_row_phylax);
+       } else {
+               raw_page_finish(pg);
+               raw_page_death_row_clean();
+       }
 }
 
 cfs_page_t *cfs_alloc_page(u_int32_t flags)
@@ -471,22 +471,22 @@ int cfs_mem_init(void)
 
                 cfs_zone_nob.z_nob = nob->z_nob;
         }
-        spin_lock_init(&cfs_zone_guard);
+       spin_lock_init(&cfs_zone_guard);
 #endif
-        CFS_INIT_LIST_HEAD(&page_death_row);
-        spin_lock_init(&page_death_row_phylax);
-        raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
-        return 0;
+       CFS_INIT_LIST_HEAD(&page_death_row);
+       spin_lock_init(&page_death_row_phylax);
+       raw_page_cache = cfs_mem_cache_create("raw-page", CFS_PAGE_SIZE, 0, 0);
+       return 0;
 }
 
 void cfs_mem_fini(void)
 {
-        raw_page_death_row_clean();
-        spin_lock_done(&page_death_row_phylax);
-        cfs_mem_cache_destroy(raw_page_cache);
+       raw_page_death_row_clean();
+       spin_lock_done(&page_death_row_phylax);
+       cfs_mem_cache_destroy(raw_page_cache);
 
-#if     CFS_INDIVIDUAL_ZONE
-        cfs_zone_nob.z_nob = NULL;
-        spin_lock_done(&cfs_zone_guard);
+#if CFS_INDIVIDUAL_ZONE
+       cfs_zone_nob.z_nob = NULL;
+       spin_lock_done(&cfs_zone_guard);
 #endif
 }
index 494475d..473edf7 100644 (file)
@@ -85,7 +85,7 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
                 int count = cfs_atomic_inc_return(&cfs_fail_count);
 
                 if (count >= cfs_fail_val) {
-                        cfs_set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+                       set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
                         cfs_atomic_set(&cfs_fail_count, 0);
                         /* we are lost race to increase  */
                         if (count > cfs_fail_val)
@@ -95,9 +95,9 @@ int __cfs_fail_check_set(__u32 id, __u32 value, int set)
 
         if ((set == CFS_FAIL_LOC_ORSET || set == CFS_FAIL_LOC_RESET) &&
             (value & CFS_FAIL_ONCE))
-                cfs_set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
+               set_bit(CFS_FAIL_ONCE_BIT, &cfs_fail_loc);
         /* Lost race to set CFS_FAILED_BIT. */
-        if (cfs_test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
+       if (test_and_set_bit(CFS_FAILED_BIT, &cfs_fail_loc)) {
                 /* If CFS_FAIL_ONCE is valid, only one process can fail,
                  * otherwise multi-process can fail at the same time. */
                 if (cfs_fail_loc & CFS_FAIL_ONCE)
index cb260a4..37308a7 100644 (file)
@@ -126,31 +126,31 @@ cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
 static inline void
 cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
 {
-        cfs_spin_lock(&lock->spin);
+       spin_lock(&lock->spin);
 }
 
 static inline void
 cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
 {
-        cfs_spin_unlock(&lock->spin);
+       spin_unlock(&lock->spin);
 }
 
 static inline void
 cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
 {
-        if (!exclusive)
-                cfs_read_lock(&lock->rw);
-        else
-                cfs_write_lock(&lock->rw);
+       if (!exclusive)
+               read_lock(&lock->rw);
+       else
+               write_lock(&lock->rw);
 }
 
 static inline void
 cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
 {
-        if (!exclusive)
-                cfs_read_unlock(&lock->rw);
-        else
-                cfs_write_unlock(&lock->rw);
+       if (!exclusive)
+               read_unlock(&lock->rw);
+       else
+               write_unlock(&lock->rw);
 }
 
 /** No lock hash */
@@ -210,15 +210,15 @@ static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
 static void
 cfs_hash_lock_setup(cfs_hash_t *hs)
 {
-        if (cfs_hash_with_no_lock(hs)) {
-                hs->hs_lops = &cfs_hash_nl_lops;
+       if (cfs_hash_with_no_lock(hs)) {
+               hs->hs_lops = &cfs_hash_nl_lops;
 
-        } else if (cfs_hash_with_no_bktlock(hs)) {
-                hs->hs_lops = &cfs_hash_nbl_lops;
-                cfs_spin_lock_init(&hs->hs_lock.spin);
+       } else if (cfs_hash_with_no_bktlock(hs)) {
+               hs->hs_lops = &cfs_hash_nbl_lops;
+               spin_lock_init(&hs->hs_lock.spin);
 
-        } else if (cfs_hash_with_rehash(hs)) {
-                cfs_rwlock_init(&hs->hs_lock.rw);
+       } else if (cfs_hash_with_rehash(hs)) {
+               rwlock_init(&hs->hs_lock.rw);
 
                 if (cfs_hash_with_rw_bktlock(hs))
                         hs->hs_lops = &cfs_hash_bkt_rw_lops;
@@ -506,12 +506,12 @@ cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
                    max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
                 return;
 
-        cfs_spin_lock(&hs->hs_dep_lock);
-        hs->hs_dep_max  = dep_cur;
-        hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
-        hs->hs_dep_off  = bd->bd_offset;
-        hs->hs_dep_bits = hs->hs_cur_bits;
-        cfs_spin_unlock(&hs->hs_dep_lock);
+       spin_lock(&hs->hs_dep_lock);
+       hs->hs_dep_max  = dep_cur;
+       hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
+       hs->hs_dep_off  = bd->bd_offset;
+       hs->hs_dep_bits = hs->hs_cur_bits;
+       spin_unlock(&hs->hs_dep_lock);
 
        cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
 # endif
@@ -936,14 +936,14 @@ cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
                     cfs_hash_with_no_bktlock(hs))
                         continue;
 
-                if (cfs_hash_with_rw_bktlock(hs))
-                        cfs_rwlock_init(&new_bkts[i]->hsb_lock.rw);
-                else if (cfs_hash_with_spin_bktlock(hs))
-                        cfs_spin_lock_init(&new_bkts[i]->hsb_lock.spin);
-                else
-                        LBUG(); /* invalid use-case */
-        }
-        return new_bkts;
+               if (cfs_hash_with_rw_bktlock(hs))
+                       rwlock_init(&new_bkts[i]->hsb_lock.rw);
+               else if (cfs_hash_with_spin_bktlock(hs))
+                       spin_lock_init(&new_bkts[i]->hsb_lock.spin);
+               else
+                       LBUG(); /* invalid use-case */
+       }
+       return new_bkts;
 }
 
 /**
@@ -960,45 +960,45 @@ static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 static int cfs_hash_dep_print(cfs_workitem_t *wi)
 {
-        cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
-        int         dep;
-        int         bkt;
-        int         off;
-        int         bits;
-
-        cfs_spin_lock(&hs->hs_dep_lock);
-        dep  = hs->hs_dep_max;
-        bkt  = hs->hs_dep_bkt;
-        off  = hs->hs_dep_off;
-        bits = hs->hs_dep_bits;
-        cfs_spin_unlock(&hs->hs_dep_lock);
-
-        LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
-                      hs->hs_name, bits, dep, bkt, off);
-        cfs_spin_lock(&hs->hs_dep_lock);
-        hs->hs_dep_bits = 0; /* mark as workitem done */
-        cfs_spin_unlock(&hs->hs_dep_lock);
-        return 0;
+       cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
+       int         dep;
+       int         bkt;
+       int         off;
+       int         bits;
+
+       spin_lock(&hs->hs_dep_lock);
+       dep  = hs->hs_dep_max;
+       bkt  = hs->hs_dep_bkt;
+       off  = hs->hs_dep_off;
+       bits = hs->hs_dep_bits;
+       spin_unlock(&hs->hs_dep_lock);
+
+       LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
+                     hs->hs_name, bits, dep, bkt, off);
+       spin_lock(&hs->hs_dep_lock);
+       hs->hs_dep_bits = 0; /* mark as workitem done */
+       spin_unlock(&hs->hs_dep_lock);
+       return 0;
 }
 
 static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
 {
-       cfs_spin_lock_init(&hs->hs_dep_lock);
+       spin_lock_init(&hs->hs_dep_lock);
        cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
 }
 
 static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
 {
        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
-                return;
+               return;
 
-        cfs_spin_lock(&hs->hs_dep_lock);
-        while (hs->hs_dep_bits != 0) {
-                cfs_spin_unlock(&hs->hs_dep_lock);
-                cfs_cond_resched();
-                cfs_spin_lock(&hs->hs_dep_lock);
-        }
-        cfs_spin_unlock(&hs->hs_dep_lock);
+       spin_lock(&hs->hs_dep_lock);
+       while (hs->hs_dep_bits != 0) {
+               spin_unlock(&hs->hs_dep_lock);
+               cfs_cond_resched();
+               spin_lock(&hs->hs_dep_lock);
+       }
+       spin_unlock(&hs->hs_dep_lock);
 }
 
 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
@@ -2107,7 +2107,7 @@ int cfs_hash_debug_str(cfs_hash_t *hs, char *str, int size)
                 if (maxdep < bd.bd_bucket->hsb_depmax) {
                         maxdep  = bd.bd_bucket->hsb_depmax;
 #ifdef __KERNEL__
-                        maxdepb = cfs_ffz(~maxdep);
+                       maxdepb = ffz(~maxdep);
 #endif
                 }
                 total += bd.bd_bucket->hsb_count;
index 1e572f4..d834630 100644 (file)
@@ -192,7 +192,7 @@ struct kkuc_reg {
 };
 static cfs_list_t kkuc_groups[KUC_GRP_MAX+1] = {};
 /* Protect message sending against remove and adds */
-static CFS_DECLARE_RWSEM(kg_sem);
+static DECLARE_RWSEM(kg_sem);
 
 /** Add a receiver to a broadcast group
  * @param filp pipe to write into
@@ -221,11 +221,11 @@ int libcfs_kkuc_group_add(cfs_file_t *filp, int uid, int group, __u32 data)
         reg->kr_uid = uid;
         reg->kr_data = data;
 
-        cfs_down_write(&kg_sem);
+       down_write(&kg_sem);
         if (kkuc_groups[group].next == NULL)
                 CFS_INIT_LIST_HEAD(&kkuc_groups[group]);
         cfs_list_add(&reg->kr_chain, &kkuc_groups[group]);
-        cfs_up_write(&kg_sem);
+       up_write(&kg_sem);
 
         CDEBUG(D_KUC, "Added uid=%d fp=%p to group %d\n", uid, filp, group);
 
@@ -252,7 +252,7 @@ int libcfs_kkuc_group_rem(int uid, int group)
                 libcfs_kkuc_group_put(group, &lh);
         }
 
-        cfs_down_write(&kg_sem);
+       down_write(&kg_sem);
         cfs_list_for_each_entry_safe(reg, next, &kkuc_groups[group], kr_chain) {
                 if ((uid == 0) || (uid == reg->kr_uid)) {
                         cfs_list_del(&reg->kr_chain);
@@ -263,7 +263,7 @@ int libcfs_kkuc_group_rem(int uid, int group)
                         cfs_free(reg);
                 }
         }
-        cfs_up_write(&kg_sem);
+       up_write(&kg_sem);
 
         RETURN(0);
 }
@@ -275,7 +275,7 @@ int libcfs_kkuc_group_put(int group, void *payload)
         int rc = 0;
         ENTRY;
 
-        cfs_down_read(&kg_sem);
+       down_read(&kg_sem);
         cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
                 if (reg->kr_fp != NULL) {
                 rc = libcfs_kkuc_msg_put(reg->kr_fp, payload);
@@ -285,7 +285,7 @@ int libcfs_kkuc_group_put(int group, void *payload)
                         }
                 }
         }
-        cfs_up_read(&kg_sem);
+       up_read(&kg_sem);
 
         RETURN(rc);
 }
@@ -313,13 +313,13 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
         if (kkuc_groups[group].next == NULL)
                 RETURN(0);
 
-        cfs_down_read(&kg_sem);
+       down_read(&kg_sem);
         cfs_list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
                 if (reg->kr_fp != NULL) {
                         rc = cb_func(reg->kr_data, cb_arg);
                 }
         }
-        cfs_up_read(&kg_sem);
+       up_read(&kg_sem);
 
         RETURN(rc);
 }
index a587e95..adf157f 100644 (file)
@@ -62,7 +62,7 @@ struct cfs_percpt_lock *
 cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
 {
        struct cfs_percpt_lock  *pcl;
-       cfs_spinlock_t          *lock;
+       spinlock_t              *lock;
        int                     i;
 
        /* NB: cptab can be NULL, pcl will be for HW CPUs on that case */
@@ -78,7 +78,7 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
        }
 
        cfs_percpt_for_each(lock, i, pcl->pcl_locks)
-               cfs_spin_lock_init(lock);
+               spin_lock_init(lock);
 
        return pcl;
 }
@@ -109,13 +109,13 @@ cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index)
        }
 
        if (likely(index != CFS_PERCPT_LOCK_EX)) {
-               cfs_spin_lock(pcl->pcl_locks[index]);
+               spin_lock(pcl->pcl_locks[index]);
                return;
        }
 
        /* exclusive lock request */
        for (i = 0; i < ncpt; i++) {
-               cfs_spin_lock(pcl->pcl_locks[i]);
+               spin_lock(pcl->pcl_locks[i]);
                if (i == 0) {
                        LASSERT(!pcl->pcl_locked);
                        /* nobody should take private lock after this
@@ -136,7 +136,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
        index = ncpt == 1 ? 0 : index;
 
        if (likely(index != CFS_PERCPT_LOCK_EX)) {
-               cfs_spin_unlock(pcl->pcl_locks[index]);
+               spin_unlock(pcl->pcl_locks[index]);
                return;
        }
 
@@ -145,7 +145,7 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
                        LASSERT(pcl->pcl_locked);
                        pcl->pcl_locked = 0;
                }
-               cfs_spin_unlock(pcl->pcl_locks[i]);
+               spin_unlock(pcl->pcl_locks[i]);
        }
 }
 CFS_EXPORT_SYMBOL(cfs_percpt_unlock);
index 3c5956e..fe0b63a 100644 (file)
 
 int oom_get_adj(struct task_struct *task, int scope)
 {
-
-        int oom_adj;
+       int oom_adj;
 #ifdef HAVE_OOMADJ_IN_SIG
-        unsigned long flags;
+       unsigned long flags;
 
-        spin_lock_irqsave(&task->sighand->siglock, flags);
-        oom_adj = task->signal->oom_adj;
-        task->signal->oom_adj = scope;
-        spin_unlock_irqrestore(&task->sighand->siglock, flags);
+       spin_lock_irqsave(&task->sighand->siglock, flags);
+       oom_adj = task->signal->oom_adj;
+       task->signal->oom_adj = scope;
+       spin_unlock_irqrestore(&task->sighand->siglock, flags);
 
 #else
-        oom_adj = task->oomkilladj;
-        task->oomkilladj = scope;
+       oom_adj = task->oomkilladj;
+       task->oomkilladj = scope;
 #endif
-        return oom_adj;
+       return oom_adj;
 }
 
 int cfs_create_thread(int (*fn)(void *),
index 1df8542..aa9c377 100644 (file)
@@ -105,11 +105,11 @@ EXPORT_SYMBOL(cfs_waitq_add_exclusive);
 void
 cfs_waitq_add_exclusive_head(cfs_waitq_t *waitq, cfs_waitlink_t *link)
 {
-        unsigned long flags;
+       unsigned long flags;
 
-        spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
-        __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
-        spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+       spin_lock_irqsave(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
+       __add_wait_queue_exclusive(LINUX_WAITQ_HEAD(waitq), LINUX_WAITQ(link));
+       spin_unlock_irqrestore(&LINUX_WAITQ_HEAD(waitq)->lock, flags);
 }
 EXPORT_SYMBOL(cfs_waitq_add_exclusive_head);
 
index 411178c..95b55ce 100644 (file)
@@ -47,7 +47,7 @@ static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
 
 char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
 
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
 
 int cfs_tracefile_init_arch()
 {
@@ -55,7 +55,7 @@ int cfs_tracefile_init_arch()
        int    j;
        struct cfs_trace_cpu_data *tcd;
 
-       cfs_init_rwsem(&cfs_tracefile_sem);
+       init_rwsem(&cfs_tracefile_sem);
 
        /* initialize trace_data */
        memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
@@ -70,7 +70,7 @@ int cfs_tracefile_init_arch()
 
        /* arch related info initialized */
        cfs_tcd_for_each(tcd, i, j) {
-               cfs_spin_lock_init(&tcd->tcd_lock);
+               spin_lock_init(&tcd->tcd_lock);
                tcd->tcd_pages_factor = pages_factor[i];
                tcd->tcd_type = i;
                tcd->tcd_cpu = j;
@@ -111,27 +111,27 @@ void cfs_tracefile_fini_arch()
                cfs_trace_data[i] = NULL;
        }
 
-       cfs_fini_rwsem(&cfs_tracefile_sem);
+       fini_rwsem(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_read_lock()
 {
-       cfs_down_read(&cfs_tracefile_sem);
+       down_read(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_read_unlock()
 {
-       cfs_up_read(&cfs_tracefile_sem);
+       up_read(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_write_lock()
 {
-       cfs_down_write(&cfs_tracefile_sem);
+       down_write(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_write_unlock()
 {
-       cfs_up_write(&cfs_tracefile_sem);
+       up_write(&cfs_tracefile_sem);
 }
 
 cfs_trace_buf_type_t cfs_trace_buf_idx_get()
@@ -153,28 +153,28 @@ cfs_trace_buf_type_t cfs_trace_buf_idx_get()
 int cfs_trace_lock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
 {
        __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
-        if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
-                cfs_spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
-        else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
-                cfs_spin_lock_bh(&tcd->tcd_lock);
-        else if (unlikely(walking))
-                cfs_spin_lock_irq(&tcd->tcd_lock);
-        else
-                cfs_spin_lock(&tcd->tcd_lock);
+       if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+               spin_lock_irqsave(&tcd->tcd_lock, tcd->tcd_lock_flags);
+       else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+               spin_lock_bh(&tcd->tcd_lock);
+       else if (unlikely(walking))
+               spin_lock_irq(&tcd->tcd_lock);
+       else
+               spin_lock(&tcd->tcd_lock);
        return 1;
 }
 
 void cfs_trace_unlock_tcd(struct cfs_trace_cpu_data *tcd, int walking)
 {
        __LASSERT(tcd->tcd_type < CFS_TCD_TYPE_MAX);
-        if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
-                cfs_spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
-        else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
-                cfs_spin_unlock_bh(&tcd->tcd_lock);
-        else if (unlikely(walking))
-                cfs_spin_unlock_irq(&tcd->tcd_lock);
-        else
-                cfs_spin_unlock(&tcd->tcd_lock);
+       if (tcd->tcd_type == CFS_TCD_TYPE_IRQ)
+               spin_unlock_irqrestore(&tcd->tcd_lock, tcd->tcd_lock_flags);
+       else if (tcd->tcd_type == CFS_TCD_TYPE_SOFTIRQ)
+               spin_unlock_bh(&tcd->tcd_lock);
+       else if (unlikely(walking))
+               spin_unlock_irq(&tcd->tcd_lock);
+       else
+               spin_unlock(&tcd->tcd_lock);
 }
 
 int cfs_tcd_owns_tage(struct cfs_trace_cpu_data *tcd,
index b6cb94f..def2036 100644 (file)
@@ -183,19 +183,19 @@ static int libcfs_psdev_release(unsigned long flags, void *args)
         RETURN(0);
 }
 
-static cfs_rw_semaphore_t ioctl_list_sem;
+static struct rw_semaphore ioctl_list_sem;
 static cfs_list_t ioctl_list;
 
 int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand)
 {
         int rc = 0;
 
-        cfs_down_write(&ioctl_list_sem);
+       down_write(&ioctl_list_sem);
         if (!cfs_list_empty(&hand->item))
                 rc = -EBUSY;
         else
                 cfs_list_add_tail(&hand->item, &ioctl_list);
-        cfs_up_write(&ioctl_list_sem);
+       up_write(&ioctl_list_sem);
 
         return rc;
 }
@@ -205,12 +205,12 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
 {
         int rc = 0;
 
-        cfs_down_write(&ioctl_list_sem);
+       down_write(&ioctl_list_sem);
         if (cfs_list_empty(&hand->item))
                 rc = -ENOENT;
         else
                 cfs_list_del_init(&hand->item);
-        cfs_up_write(&ioctl_list_sem);
+       up_write(&ioctl_list_sem);
 
         return rc;
 }
@@ -305,7 +305,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
         default: {
                 struct libcfs_ioctl_handler *hand;
                 err = -EINVAL;
-                cfs_down_read(&ioctl_list_sem);
+               down_read(&ioctl_list_sem);
                 cfs_list_for_each_entry_typed(hand, &ioctl_list,
                         struct libcfs_ioctl_handler, item) {
                         err = hand->handle_ioctl(cmd, data);
@@ -316,7 +316,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
                                 break;
                         }
                 }
-                cfs_up_read(&ioctl_list_sem);
+               up_read(&ioctl_list_sem);
                 break;
         }
         }
@@ -365,8 +365,8 @@ MODULE_DESCRIPTION("Portals v3.1");
 MODULE_LICENSE("GPL");
 
 extern cfs_psdev_t libcfs_dev;
-extern cfs_rw_semaphore_t cfs_tracefile_sem;
-extern cfs_mutex_t cfs_trace_thread_mutex;
+extern struct rw_semaphore cfs_tracefile_sem;
+extern struct mutex cfs_trace_thread_mutex;
 extern struct cfs_wi_sched *cfs_sched_rehash;
 
 extern void libcfs_init_nidstrings(void);
@@ -379,9 +379,9 @@ static int init_libcfs_module(void)
 
        libcfs_arch_init();
        libcfs_init_nidstrings();
-       cfs_init_rwsem(&cfs_tracefile_sem);
-       cfs_mutex_init(&cfs_trace_thread_mutex);
-       cfs_init_rwsem(&ioctl_list_sem);
+       init_rwsem(&cfs_tracefile_sem);
+       mutex_init(&cfs_trace_thread_mutex);
+       init_rwsem(&ioctl_list_sem);
        CFS_INIT_LIST_HEAD(&ioctl_list);
        cfs_waitq_init(&cfs_race_waitq);
 
@@ -488,8 +488,8 @@ static void exit_libcfs_module(void)
                printk(CFS_KERN_ERR "LustreError: libcfs_debug_cleanup: %d\n",
                       rc);
 
-       cfs_fini_rwsem(&ioctl_list_sem);
-       cfs_fini_rwsem(&cfs_tracefile_sem);
+       fini_rwsem(&ioctl_list_sem);
+       fini_rwsem(&cfs_tracefile_sem);
 
        libcfs_arch_cleanup();
 }
index 4bcb858..5b2c478 100644 (file)
@@ -64,15 +64,15 @@ static char      libcfs_nidstrings[LNET_NIDSTR_COUNT][LNET_NIDSTR_SIZE];
 static int       libcfs_nidstring_idx = 0;
 
 #ifdef __KERNEL__
-static cfs_spinlock_t libcfs_nidstring_lock;
+static spinlock_t libcfs_nidstring_lock;
 
 void libcfs_init_nidstrings (void)
 {
-        cfs_spin_lock_init(&libcfs_nidstring_lock);
+       spin_lock_init(&libcfs_nidstring_lock);
 }
 
-# define NIDSTR_LOCK(f)   cfs_spin_lock_irqsave(&libcfs_nidstring_lock, f)
-# define NIDSTR_UNLOCK(f) cfs_spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
+# define NIDSTR_LOCK(f)   spin_lock_irqsave(&libcfs_nidstring_lock, f)
+# define NIDSTR_UNLOCK(f) spin_unlock_irqrestore(&libcfs_nidstring_lock, f)
 #else
 # define NIDSTR_LOCK(f)   (f=sizeof(f))  /* avoid set-but-unused warnings */
 # define NIDSTR_UNLOCK(f) (f=sizeof(f))
index 115a70a..cdc218e 100644 (file)
@@ -50,7 +50,7 @@ union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS] __cache
 char cfs_tracefile[TRACEFILE_NAME_SIZE];
 long long cfs_tracefile_size = CFS_TRACEFILE_SIZE;
 static struct tracefiled_ctl trace_tctl;
-cfs_mutex_t cfs_trace_thread_mutex;
+struct mutex cfs_trace_thread_mutex;
 static int thread_running = 0;
 
 cfs_atomic_t cfs_tage_allocated = CFS_ATOMIC_INIT(0);
@@ -198,7 +198,7 @@ static void cfs_tcd_shrink(struct cfs_trace_cpu_data *tcd)
                        pgcount + 1, tcd->tcd_cur_pages);
 
         CFS_INIT_LIST_HEAD(&pc.pc_pages);
-        cfs_spin_lock_init(&pc.pc_lock);
+       spin_lock_init(&pc.pc_lock);
 
         cfs_list_for_each_entry_safe_typed(tage, tmp, &tcd->tcd_pages,
                                            struct cfs_trace_page, linkage) {
@@ -532,10 +532,10 @@ panic_collect_pages(struct page_collection *pc)
 
 static void collect_pages_on_all_cpus(struct page_collection *pc)
 {
-        struct cfs_trace_cpu_data *tcd;
-        int i, cpu;
+       struct cfs_trace_cpu_data *tcd;
+       int i, cpu;
 
-        cfs_spin_lock(&pc->pc_lock);
+       spin_lock(&pc->pc_lock);
         cfs_for_each_possible_cpu(cpu) {
                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
                         cfs_list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
@@ -547,7 +547,7 @@ static void collect_pages_on_all_cpus(struct page_collection *pc)
                         }
                 }
         }
-        cfs_spin_unlock(&pc->pc_lock);
+       spin_unlock(&pc->pc_lock);
 }
 
 static void collect_pages(struct page_collection *pc)
@@ -568,7 +568,7 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc)
         struct cfs_trace_page *tmp;
         int i, cpu;
 
-        cfs_spin_lock(&pc->pc_lock);
+       spin_lock(&pc->pc_lock);
         cfs_for_each_possible_cpu(cpu) {
                 cfs_tcd_for_each_type_lock(tcd, i, cpu) {
                         cur_head = tcd->tcd_pages.next;
@@ -588,7 +588,7 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc)
                         }
                 }
         }
-        cfs_spin_unlock(&pc->pc_lock);
+       spin_unlock(&pc->pc_lock);
 }
 
 static void put_pages_back(struct page_collection *pc)
@@ -602,12 +602,12 @@ static void put_pages_back(struct page_collection *pc)
  * if we have been steadily writing (and otherwise discarding) pages via the
  * debug daemon. */
 static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
-                                         struct cfs_trace_cpu_data *tcd)
+                                        struct cfs_trace_cpu_data *tcd)
 {
-        struct cfs_trace_page *tage;
-        struct cfs_trace_page *tmp;
+       struct cfs_trace_page *tage;
+       struct cfs_trace_page *tmp;
 
-        cfs_spin_lock(&pc->pc_lock);
+       spin_lock(&pc->pc_lock);
         cfs_list_for_each_entry_safe_typed(tage, tmp, &pc->pc_pages,
                                            struct cfs_trace_page, linkage) {
 
@@ -632,7 +632,7 @@ static void put_pages_on_tcd_daemon_list(struct page_collection *pc,
                         tcd->tcd_cur_daemon_pages--;
                 }
         }
-        cfs_spin_unlock(&pc->pc_lock);
+       spin_unlock(&pc->pc_lock);
 }
 
 static void put_pages_on_daemon_list(struct page_collection *pc)
@@ -648,11 +648,11 @@ static void put_pages_on_daemon_list(struct page_collection *pc)
 
 void cfs_trace_debug_print(void)
 {
-        struct page_collection pc;
-        struct cfs_trace_page *tage;
-        struct cfs_trace_page *tmp;
+       struct page_collection pc;
+       struct cfs_trace_page *tage;
+       struct cfs_trace_page *tmp;
 
-        cfs_spin_lock_init(&pc.pc_lock);
+       spin_lock_init(&pc.pc_lock);
 
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
@@ -708,7 +708,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
                 goto out;
         }
 
-        cfs_spin_lock_init(&pc.pc_lock);
+       spin_lock_init(&pc.pc_lock);
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
         if (cfs_list_empty(&pc.pc_pages)) {
@@ -749,11 +749,11 @@ int cfs_tracefile_dump_all_pages(char *filename)
 
 void cfs_trace_flush_pages(void)
 {
-        struct page_collection pc;
-        struct cfs_trace_page *tage;
-        struct cfs_trace_page *tmp;
+       struct page_collection pc;
+       struct cfs_trace_page *tage;
+       struct cfs_trace_page *tmp;
 
-        cfs_spin_lock_init(&pc.pc_lock);
+       spin_lock_init(&pc.pc_lock);
 
         pc.pc_want_daemon_pages = 1;
         collect_pages(&pc);
@@ -1001,8 +1001,8 @@ static int tracefiled(void *arg)
         /* this is so broken in uml?  what on earth is going on? */
         cfs_daemonize("ktracefiled");
 
-        cfs_spin_lock_init(&pc.pc_lock);
-        cfs_complete(&tctl->tctl_start);
+       spin_lock_init(&pc.pc_lock);
+       complete(&tctl->tctl_start);
 
         while (1) {
                 cfs_waitlink_t __wait;
@@ -1095,7 +1095,7 @@ end_loop:
                                     cfs_time_seconds(1));
                 cfs_waitq_del(&tctl->tctl_waitq, &__wait);
         }
-        cfs_complete(&tctl->tctl_stop);
+       complete(&tctl->tctl_stop);
         return 0;
 }
 
@@ -1104,12 +1104,12 @@ int cfs_trace_start_thread(void)
         struct tracefiled_ctl *tctl = &trace_tctl;
         int rc = 0;
 
-        cfs_mutex_lock(&cfs_trace_thread_mutex);
+       mutex_lock(&cfs_trace_thread_mutex);
         if (thread_running)
                 goto out;
 
-        cfs_init_completion(&tctl->tctl_start);
-        cfs_init_completion(&tctl->tctl_stop);
+       init_completion(&tctl->tctl_start);
+       init_completion(&tctl->tctl_stop);
         cfs_waitq_init(&tctl->tctl_waitq);
         cfs_atomic_set(&tctl->tctl_shutdown, 0);
 
@@ -1118,10 +1118,10 @@ int cfs_trace_start_thread(void)
                 goto out;
         }
 
-        cfs_wait_for_completion(&tctl->tctl_start);
+       wait_for_completion(&tctl->tctl_start);
         thread_running = 1;
 out:
-        cfs_mutex_unlock(&cfs_trace_thread_mutex);
+       mutex_unlock(&cfs_trace_thread_mutex);
         return rc;
 }
 
@@ -1129,15 +1129,15 @@ void cfs_trace_stop_thread(void)
 {
         struct tracefiled_ctl *tctl = &trace_tctl;
 
-        cfs_mutex_lock(&cfs_trace_thread_mutex);
+       mutex_lock(&cfs_trace_thread_mutex);
         if (thread_running) {
                 printk(CFS_KERN_INFO
                        "Lustre: shutting down debug daemon thread...\n");
                 cfs_atomic_set(&tctl->tctl_shutdown, 1);
-                cfs_wait_for_completion(&tctl->tctl_stop);
+               wait_for_completion(&tctl->tctl_stop);
                 thread_running = 0;
         }
-        cfs_mutex_unlock(&cfs_trace_thread_mutex);
+       mutex_unlock(&cfs_trace_thread_mutex);
 }
 
 int cfs_tracefile_init(int max_pages)
@@ -1197,14 +1197,14 @@ static void trace_cleanup_on_all_cpus(void)
 
 static void cfs_trace_cleanup(void)
 {
-        struct page_collection pc;
+       struct page_collection pc;
 
-        CFS_INIT_LIST_HEAD(&pc.pc_pages);
-        cfs_spin_lock_init(&pc.pc_lock);
+       CFS_INIT_LIST_HEAD(&pc.pc_pages);
+       spin_lock_init(&pc.pc_lock);
 
-        trace_cleanup_on_all_cpus();
+       trace_cleanup_on_all_cpus();
 
-        cfs_tracefile_fini_arch();
+       cfs_tracefile_fini_arch();
 }
 
 void cfs_tracefile_exit(void)
index 5e7e9b1..b8124e2 100644 (file)
@@ -117,7 +117,7 @@ union cfs_trace_data_union {
                 * and trace_put_tcd, which are called in libcfs_debug_vmsg2 and
                 * tcd_for_each_type_lock
                 */
-               cfs_spinlock_t          tcd_lock;
+               spinlock_t              tcd_lock;
                unsigned long           tcd_lock_flags;
 
                /*
@@ -201,7 +201,7 @@ extern union cfs_trace_data_union (*cfs_trace_data[TCD_MAX_TYPES])[CFS_NR_CPUS];
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
 struct page_collection {
-       cfs_list_t              pc_pages;
+       cfs_list_t      pc_pages;
        /*
         * spin-lock protecting ->pc_pages. It is taken by smp_call_function()
         * call-back functions. XXX nikita: Which is horrible: all processors
@@ -209,23 +209,23 @@ struct page_collection {
         * lock. Probably ->pc_pages should be replaced with an array of
         * NR_CPUS elements accessed locklessly.
         */
-       cfs_spinlock_t          pc_lock;
+       spinlock_t      pc_lock;
        /*
         * if this flag is set, collect_pages() will spill both
         * ->tcd_daemon_pages and ->tcd_pages to the ->pc_pages. Otherwise,
         * only ->tcd_pages are spilled.
         */
-       int                     pc_want_daemon_pages;
+       int             pc_want_daemon_pages;
 };
 
 /* XXX nikita: this declaration is internal to tracefile.c and should probably
  * be moved there */
 struct tracefiled_ctl {
-       cfs_completion_t       tctl_start;
-       cfs_completion_t       tctl_stop;
-       cfs_waitq_t            tctl_waitq;
-       pid_t                  tctl_pid;
-       cfs_atomic_t           tctl_shutdown;
+       struct completion       tctl_start;
+       struct completion       tctl_stop;
+       cfs_waitq_t             tctl_waitq;
+       pid_t                   tctl_pid;
+       cfs_atomic_t            tctl_shutdown;
 };
 
 /*
index 83f8341..83b0185 100644 (file)
@@ -157,7 +157,7 @@ struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 find_again:
         found = 0;
-        cfs_spin_lock(&cache->uc_lock);
+       spin_lock(&cache->uc_lock);
         cfs_list_for_each_entry_safe(entry, next, head, ue_hash) {
                 /* check invalid & expired items */
                 if (check_unlink_entry(cache, entry))
@@ -170,7 +170,7 @@ find_again:
 
         if (!found) {
                 if (!new) {
-                        cfs_spin_unlock(&cache->uc_lock);
+                       spin_unlock(&cache->uc_lock);
                         new = alloc_entry(cache, key, args);
                         if (!new) {
                                 CERROR("fail to alloc entry\n");
@@ -194,9 +194,9 @@ find_again:
         if (UC_CACHE_IS_NEW(entry)) {
                 UC_CACHE_SET_ACQUIRING(entry);
                 UC_CACHE_CLEAR_NEW(entry);
-                cfs_spin_unlock(&cache->uc_lock);
-                rc = refresh_entry(cache, entry);
-                cfs_spin_lock(&cache->uc_lock);
+               spin_unlock(&cache->uc_lock);
+               rc = refresh_entry(cache, entry);
+               spin_lock(&cache->uc_lock);
                 entry->ue_acquire_expire =
                         cfs_time_shift(cache->uc_acquire_expire);
                 if (rc < 0) {
@@ -220,12 +220,12 @@ find_again:
                 cfs_waitlink_init(&wait);
                 cfs_waitq_add(&entry->ue_waitq, &wait);
                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
-                cfs_spin_unlock(&cache->uc_lock);
+               spin_unlock(&cache->uc_lock);
 
-                left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
-                                           expiry);
+               left = cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
+                                          expiry);
 
-                cfs_spin_lock(&cache->uc_lock);
+               spin_lock(&cache->uc_lock);
                 cfs_waitq_del(&entry->ue_waitq, &wait);
                 if (UC_CACHE_IS_ACQUIRING(entry)) {
                         /* we're interrupted or upcall failed in the middle */
@@ -253,36 +253,36 @@ find_again:
                  * without any error, should at least give a
                  * chance to use it once.
                  */
-                if (entry != new) {
-                        put_entry(cache, entry);
-                        cfs_spin_unlock(&cache->uc_lock);
-                        new = NULL;
-                        goto find_again;
-                }
-        }
+               if (entry != new) {
+                       put_entry(cache, entry);
+                       spin_unlock(&cache->uc_lock);
+                       new = NULL;
+                       goto find_again;
+               }
+       }
 
         /* Now we know it's good */
 out:
-        cfs_spin_unlock(&cache->uc_lock);
-        RETURN(entry);
+       spin_unlock(&cache->uc_lock);
+       RETURN(entry);
 }
 EXPORT_SYMBOL(upcall_cache_get_entry);
 
 void upcall_cache_put_entry(struct upcall_cache *cache,
                             struct upcall_cache_entry *entry)
 {
-        ENTRY;
-
-        if (!entry) {
-                EXIT;
-                return;
-        }
-
-        LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
-        cfs_spin_lock(&cache->uc_lock);
-        put_entry(cache, entry);
-        cfs_spin_unlock(&cache->uc_lock);
-        EXIT;
+       ENTRY;
+
+       if (!entry) {
+               EXIT;
+               return;
+       }
+
+       LASSERT(cfs_atomic_read(&entry->ue_refcount) > 0);
+       spin_lock(&cache->uc_lock);
+       put_entry(cache, entry);
+       spin_unlock(&cache->uc_lock);
+       EXIT;
 }
 EXPORT_SYMBOL(upcall_cache_put_entry);
 
@@ -298,7 +298,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
 
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 
-        cfs_spin_lock(&cache->uc_lock);
+       spin_lock(&cache->uc_lock);
         cfs_list_for_each_entry(entry, head, ue_hash) {
                 if (downcall_compare(cache, entry, key, args) == 0) {
                         found = 1;
@@ -311,7 +311,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
                        cache->uc_name, key);
                 /* haven't found, it's possible */
-                cfs_spin_unlock(&cache->uc_lock);
+               spin_unlock(&cache->uc_lock);
                 RETURN(-EINVAL);
         }
 
@@ -333,10 +333,10 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
                 GOTO(out, rc = -EINVAL);
         }
 
-        cfs_spin_unlock(&cache->uc_lock);
-        if (cache->uc_ops->parse_downcall)
-                rc = cache->uc_ops->parse_downcall(cache, entry, args);
-        cfs_spin_lock(&cache->uc_lock);
+       spin_unlock(&cache->uc_lock);
+       if (cache->uc_ops->parse_downcall)
+               rc = cache->uc_ops->parse_downcall(cache, entry, args);
+       spin_lock(&cache->uc_lock);
         if (rc)
                 GOTO(out, rc);
 
@@ -350,21 +350,21 @@ out:
                 cfs_list_del_init(&entry->ue_hash);
         }
         UC_CACHE_CLEAR_ACQUIRING(entry);
-        cfs_spin_unlock(&cache->uc_lock);
-        cfs_waitq_broadcast(&entry->ue_waitq);
-        put_entry(cache, entry);
+       spin_unlock(&cache->uc_lock);
+       cfs_waitq_broadcast(&entry->ue_waitq);
+       put_entry(cache, entry);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 EXPORT_SYMBOL(upcall_cache_downcall);
 
 static void cache_flush(struct upcall_cache *cache, int force)
 {
-        struct upcall_cache_entry *entry, *next;
-        int i;
-        ENTRY;
+       struct upcall_cache_entry *entry, *next;
+       int i;
+       ENTRY;
 
-        cfs_spin_lock(&cache->uc_lock);
+       spin_lock(&cache->uc_lock);
         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
                 cfs_list_for_each_entry_safe(entry, next,
                                          &cache->uc_hashtable[i], ue_hash) {
@@ -376,8 +376,8 @@ static void cache_flush(struct upcall_cache *cache, int force)
                         free_entry(cache, entry);
                 }
         }
-        cfs_spin_unlock(&cache->uc_lock);
-        EXIT;
+       spin_unlock(&cache->uc_lock);
+       EXIT;
 }
 
 void upcall_cache_flush_idle(struct upcall_cache *cache)
@@ -401,7 +401,7 @@ void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
 
         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
 
-        cfs_spin_lock(&cache->uc_lock);
+       spin_lock(&cache->uc_lock);
         cfs_list_for_each_entry(entry, head, ue_hash) {
                 if (upcall_compare(cache, entry, key, args) == 0) {
                         found = 1;
@@ -420,7 +420,7 @@ void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
                 if (!cfs_atomic_read(&entry->ue_refcount))
                         free_entry(cache, entry);
         }
-        cfs_spin_unlock(&cache->uc_lock);
+       spin_unlock(&cache->uc_lock);
 }
 EXPORT_SYMBOL(upcall_cache_flush_one);
 
@@ -435,8 +435,8 @@ struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
         if (!cache)
                 RETURN(ERR_PTR(-ENOMEM));
 
-        cfs_spin_lock_init(&cache->uc_lock);
-        cfs_rwlock_init(&cache->uc_upcall_rwlock);
+       spin_lock_init(&cache->uc_lock);
+       rwlock_init(&cache->uc_upcall_rwlock);
         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
                 CFS_INIT_LIST_HEAD(&cache->uc_hashtable[i]);
         strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
index 3ac285c..e5031f6 100644 (file)
@@ -38,7 +38,7 @@
 
 #define OFF_BY_START(start)     ((start)/BITS_PER_LONG)
 
-unsigned long cfs_find_next_bit(unsigned long *addr,
+unsigned long find_next_bit(unsigned long *addr,
                                 unsigned long size, unsigned long offset)
 {
         unsigned long *word, *last;
@@ -72,7 +72,7 @@ found:
         return base + bit;
 }
 
-unsigned long cfs_find_next_zero_bit(unsigned long *addr,
+unsigned long find_next_zero_bit(unsigned long *addr,
                                      unsigned long size, unsigned long offset)
 {
         unsigned long *word, *last;
index df9b2fc..39ac628 100644 (file)
  * No-op implementation.
  */
 
-void cfs_spin_lock_init(cfs_spinlock_t *lock)
+void spin_lock_init(spinlock_t *lock)
 {
-        LASSERT(lock != NULL);
-        (void)lock;
+       LASSERT(lock != NULL);
+       (void)lock;
 }
 
-void cfs_spin_lock(cfs_spinlock_t *lock)
+void spin_lock(spinlock_t *lock)
 {
-        (void)lock;
+       (void)lock;
 }
 
-void cfs_spin_unlock(cfs_spinlock_t *lock)
+void spin_unlock(spinlock_t *lock)
 {
-        (void)lock;
+       (void)lock;
 }
 
-int cfs_spin_trylock(cfs_spinlock_t *lock)
+int spin_trylock(spinlock_t *lock)
 {
-        (void)lock;
+       (void)lock;
        return 1;
 }
 
-void cfs_spin_lock_bh_init(cfs_spinlock_t *lock)
+void spin_lock_bh_init(spinlock_t *lock)
 {
-        LASSERT(lock != NULL);
-        (void)lock;
+       LASSERT(lock != NULL);
+       (void)lock;
 }
 
-void cfs_spin_lock_bh(cfs_spinlock_t *lock)
+void spin_lock_bh(spinlock_t *lock)
 {
-        LASSERT(lock != NULL);
-        (void)lock;
+       LASSERT(lock != NULL);
+       (void)lock;
 }
 
-void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
+void spin_unlock_bh(spinlock_t *lock)
 {
-        LASSERT(lock != NULL);
-        (void)lock;
+       LASSERT(lock != NULL);
+       (void)lock;
 }
 
 /*
@@ -117,30 +117,30 @@ void cfs_spin_unlock_bh(cfs_spinlock_t *lock)
  * - __up(x)
  */
 
-void cfs_sema_init(cfs_semaphore_t *s, int val)
+void sema_init(struct semaphore *s, int val)
 {
-        LASSERT(s != NULL);
-        (void)s;
-        (void)val;
+       LASSERT(s != NULL);
+       (void)s;
+       (void)val;
 }
 
-void __down(cfs_semaphore_t *s)
+void __down(struct semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-int __down_interruptible(cfs_semaphore_t *s)
+int __down_interruptible(struct semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
-        return 0;
+       LASSERT(s != NULL);
+       (void)s;
+       return 0;
 }
 
-void __up(cfs_semaphore_t *s)
+void __up(struct semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
 
@@ -152,51 +152,51 @@ void __up(cfs_semaphore_t *s)
  * - wait_for_completion(c)
  */
 
-static cfs_wait_handler_t wait_handler;
+static wait_handler_t wait_handler;
 
-void cfs_init_completion_module(cfs_wait_handler_t handler)
+void init_completion_module(wait_handler_t handler)
 {
-        wait_handler = handler;
+       wait_handler = handler;
 }
 
-int cfs_call_wait_handler(int timeout)
+int call_wait_handler(int timeout)
 {
-        if (!wait_handler)
-                return -ENOSYS;
-        return wait_handler(timeout);
+       if (!wait_handler)
+               return -ENOSYS;
+       return wait_handler(timeout);
 }
 
-void cfs_init_completion(cfs_completion_t *c)
+void init_completion(struct completion *c)
 {
-        LASSERT(c != NULL);
-        c->done = 0;
-        cfs_waitq_init(&c->wait);
+       LASSERT(c != NULL);
+       c->done = 0;
+       cfs_waitq_init(&c->wait);
 }
 
-void cfs_complete(cfs_completion_t *c)
+void complete(struct completion *c)
 {
-        LASSERT(c != NULL);
-        c->done  = 1;
-        cfs_waitq_signal(&c->wait);
+       LASSERT(c != NULL);
+       c->done  = 1;
+       cfs_waitq_signal(&c->wait);
 }
 
-void cfs_wait_for_completion(cfs_completion_t *c)
+void wait_for_completion(struct completion *c)
 {
-        LASSERT(c != NULL);
-        do {
-                if (cfs_call_wait_handler(1000) < 0)
-                        break;
-        } while (c->done == 0);
+       LASSERT(c != NULL);
+       do {
+               if (call_wait_handler(1000) < 0)
+                       break;
+       } while (c->done == 0);
 }
 
-int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
+int wait_for_completion_interruptible(struct completion *c)
 {
-        LASSERT(c != NULL);
-        do {
-                if (cfs_call_wait_handler(1000) < 0)
-                        break;
-        } while (c->done == 0);
-        return 0;
+       LASSERT(c != NULL);
+       do {
+               if (call_wait_handler(1000) < 0)
+                       break;
+       } while (c->done == 0);
+       return 0;
 }
 
 /*
@@ -210,54 +210,54 @@ int cfs_wait_for_completion_interruptible(cfs_completion_t *c)
  * - up_write(x)
  */
 
-void cfs_init_rwsem(cfs_rw_semaphore_t *s)
+void init_rwsem(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-void cfs_down_read(cfs_rw_semaphore_t *s)
+void down_read(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-int cfs_down_read_trylock(cfs_rw_semaphore_t *s)
+int down_read_trylock(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
        return 1;
 }
 
-void cfs_down_write(cfs_rw_semaphore_t *s)
+void down_write(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-int cfs_down_write_trylock(cfs_rw_semaphore_t *s)
+int down_write_trylock(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
        return 1;
 }
 
-void cfs_up_read(cfs_rw_semaphore_t *s)
+void up_read(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-void cfs_up_write(cfs_rw_semaphore_t *s)
+void up_write(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
-void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
+void fini_rwsem(struct rw_semaphore *s)
 {
-        LASSERT(s != NULL);
-        (void)s;
+       LASSERT(s != NULL);
+       (void)s;
 }
 
 #ifdef HAVE_LIBPTHREAD
@@ -266,7 +266,7 @@ void cfs_fini_rwsem(cfs_rw_semaphore_t *s)
  * Multi-threaded user space completion
  */
 
-void cfs_mt_init_completion(cfs_mt_completion_t *c)
+void mt_init_completion(mt_completion_t *c)
 {
         LASSERT(c != NULL);
         c->c_done = 0;
@@ -274,14 +274,14 @@ void cfs_mt_init_completion(cfs_mt_completion_t *c)
         pthread_cond_init(&c->c_cond, NULL);
 }
 
-void cfs_mt_fini_completion(cfs_mt_completion_t *c)
+void mt_fini_completion(mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_destroy(&c->c_mut);
         pthread_cond_destroy(&c->c_cond);
 }
 
-void cfs_mt_complete(cfs_mt_completion_t *c)
+void mt_complete(mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_lock(&c->c_mut);
@@ -290,7 +290,7 @@ void cfs_mt_complete(cfs_mt_completion_t *c)
         pthread_mutex_unlock(&c->c_mut);
 }
 
-void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
+void mt_wait_for_completion(mt_completion_t *c)
 {
         LASSERT(c != NULL);
         pthread_mutex_lock(&c->c_mut);
@@ -306,7 +306,7 @@ void cfs_mt_wait_for_completion(cfs_mt_completion_t *c)
 
 static pthread_mutex_t atomic_guard_lock = PTHREAD_MUTEX_INITIALIZER;
 
-int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
+int mt_atomic_read(mt_atomic_t *a)
 {
         int r;
 
@@ -316,14 +316,14 @@ int cfs_mt_atomic_read(cfs_mt_atomic_t *a)
         return r;
 }
 
-void cfs_mt_atomic_set(cfs_mt_atomic_t *a, int b)
+void mt_atomic_set(mt_atomic_t *a, int b)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         a->counter = b;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
+int mt_atomic_dec_and_test(mt_atomic_t *a)
 {
         int r;
 
@@ -333,20 +333,20 @@ int cfs_mt_atomic_dec_and_test(cfs_mt_atomic_t *a)
         return (r == 0);
 }
 
-void cfs_mt_atomic_inc(cfs_mt_atomic_t *a)
+void mt_atomic_inc(mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         ++a->counter;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-void cfs_mt_atomic_dec(cfs_mt_atomic_t *a)
+void mt_atomic_dec(mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         --a->counter;
         pthread_mutex_unlock(&atomic_guard_lock);
 }
-void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
+void mt_atomic_add(int b, mt_atomic_t *a)
 
 {
         pthread_mutex_lock(&atomic_guard_lock);
@@ -354,7 +354,7 @@ void cfs_mt_atomic_add(int b, cfs_mt_atomic_t *a)
         pthread_mutex_unlock(&atomic_guard_lock);
 }
 
-void cfs_mt_atomic_sub(int b, cfs_mt_atomic_t *a)
+void mt_atomic_sub(int b, mt_atomic_t *a)
 {
         pthread_mutex_lock(&atomic_guard_lock);
         a->counter -= b;
index 0cc1e3d..2c03cb7 100644 (file)
@@ -125,7 +125,7 @@ void cfs_waitq_wait(struct cfs_waitlink *link, cfs_task_state_t state)
         (void)link;
 
         /* well, wait for something to happen */
-        cfs_call_wait_handler(0);
+       call_wait_handler(0);
 }
 
 int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
@@ -133,7 +133,7 @@ int64_t cfs_waitq_timedwait(struct cfs_waitlink *link, cfs_task_state_t state,
 {
         LASSERT(link != NULL);
         (void)link;
-        cfs_call_wait_handler(timeout);
+       call_wait_handler(timeout);
         return 0;
 }
 
index a394794..eb1125d 100644 (file)
@@ -42,7 +42,7 @@
 #include "tracefile.h"
 
 struct lc_watchdog {
-        cfs_spinlock_t  lcw_lock;     /* check or change lcw_list */
+       spinlock_t  lcw_lock;     /* check or change lcw_list */
         int             lcw_refcount; /* must hold lcw_pending_timers_lock */
         cfs_timer_t     lcw_timer;    /* kernel timer */
         cfs_list_t      lcw_list;     /* chain on pending list */
@@ -66,8 +66,8 @@ struct lc_watchdog {
  * and lcw_stop_completion when it exits.
  * Wake lcw_event_waitq to signal timer callback dispatches.
  */
-static cfs_completion_t lcw_start_completion;
-static cfs_completion_t  lcw_stop_completion;
+static struct completion lcw_start_completion;
+static struct completion  lcw_stop_completion;
 static cfs_waitq_t lcw_event_waitq;
 
 /*
@@ -84,7 +84,7 @@ static unsigned long lcw_flags = 0;
  * When it hits 0, we stop the dispatcher.
  */
 static __u32         lcw_refcount = 0;
-static CFS_DEFINE_MUTEX(lcw_refcount_mutex);
+static DEFINE_MUTEX(lcw_refcount_mutex);
 
 /*
  * List of timers that have fired that need their callbacks run by the
@@ -103,7 +103,7 @@ lcw_dump(struct lc_watchdog *lcw)
 {
         ENTRY;
 #if defined(HAVE_TASKLIST_LOCK)
-        cfs_read_lock(&tasklist_lock);
+       read_lock(&tasklist_lock);
 #else
         rcu_read_lock();
 #endif
@@ -116,7 +116,7 @@ lcw_dump(struct lc_watchdog *lcw)
         }
 
 #if defined(HAVE_TASKLIST_LOCK)
-        cfs_read_unlock(&tasklist_lock);
+       read_unlock(&tasklist_lock);
 #else
         rcu_read_unlock();
 #endif
@@ -135,30 +135,30 @@ static void lcw_cb(ulong_ptr_t data)
 
         lcw->lcw_state = LC_WATCHDOG_EXPIRED;
 
-        cfs_spin_lock_bh(&lcw->lcw_lock);
-        LASSERT(cfs_list_empty(&lcw->lcw_list));
+       spin_lock_bh(&lcw->lcw_lock);
+       LASSERT(cfs_list_empty(&lcw->lcw_list));
 
-        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-        lcw->lcw_refcount++; /* +1 for pending list */
-        cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
-        cfs_waitq_signal(&lcw_event_waitq);
+       spin_lock_bh(&lcw_pending_timers_lock);
+       lcw->lcw_refcount++; /* +1 for pending list */
+       cfs_list_add(&lcw->lcw_list, &lcw_pending_timers);
+       cfs_waitq_signal(&lcw_event_waitq);
 
-        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-        cfs_spin_unlock_bh(&lcw->lcw_lock);
-        EXIT;
+       spin_unlock_bh(&lcw_pending_timers_lock);
+       spin_unlock_bh(&lcw->lcw_lock);
+       EXIT;
 }
 
 static int is_watchdog_fired(void)
 {
-        int rc;
+       int rc;
 
-        if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags))
-                return 1;
+       if (test_bit(LCW_FLAG_STOP, &lcw_flags))
+               return 1;
 
-        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-        rc = !cfs_list_empty(&lcw_pending_timers);
-        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-        return rc;
+       spin_lock_bh(&lcw_pending_timers_lock);
+       rc = !cfs_list_empty(&lcw_pending_timers);
+       spin_unlock_bh(&lcw_pending_timers_lock);
+       return rc;
 }
 
 static void lcw_dump_stack(struct lc_watchdog *lcw)
@@ -224,7 +224,7 @@ static int lcw_dispatch_main(void *data)
         RECALC_SIGPENDING;
         SIGNAL_MASK_UNLOCK(current, flags);
 
-        cfs_complete(&lcw_start_completion);
+       complete(&lcw_start_completion);
 
         while (1) {
                 int dumplog = 1;
@@ -232,20 +232,20 @@ static int lcw_dispatch_main(void *data)
                 cfs_wait_event_interruptible(lcw_event_waitq,
                                              is_watchdog_fired(), rc);
                 CDEBUG(D_INFO, "Watchdog got woken up...\n");
-                if (cfs_test_bit(LCW_FLAG_STOP, &lcw_flags)) {
-                        CDEBUG(D_INFO, "LCW_FLAG_STOP was set, shutting down...\n");
-
-                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-                        rc = !cfs_list_empty(&lcw_pending_timers);
-                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-                        if (rc) {
-                                CERROR("pending timers list was not empty at "
-                                       "time of watchdog dispatch shutdown\n");
-                        }
-                        break;
-                }
-
-                cfs_spin_lock_bh(&lcw_pending_timers_lock);
+               if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
+                       CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
+
+                       spin_lock_bh(&lcw_pending_timers_lock);
+                       rc = !cfs_list_empty(&lcw_pending_timers);
+                       spin_unlock_bh(&lcw_pending_timers_lock);
+                       if (rc) {
+                               CERROR("pending timers list was not empty at "
+                                      "time of watchdog dispatch shutdown\n");
+                       }
+                       break;
+               }
+
+               spin_lock_bh(&lcw_pending_timers_lock);
                 while (!cfs_list_empty(&lcw_pending_timers)) {
                         int is_dumplog;
 
@@ -254,18 +254,18 @@ static int lcw_dispatch_main(void *data)
                         /* +1 ref for callback to make sure lwc wouldn't be
                          * deleted after releasing lcw_pending_timers_lock */
                         lcw->lcw_refcount++;
-                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-
-                        /* lock ordering */
-                        cfs_spin_lock_bh(&lcw->lcw_lock);
-                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-
-                        if (cfs_list_empty(&lcw->lcw_list)) {
-                                /* already removed from pending list */
-                                lcw->lcw_refcount--; /* -1 ref for callback */
-                                if (lcw->lcw_refcount == 0)
-                                        cfs_list_add(&lcw->lcw_list, &zombies);
-                                cfs_spin_unlock_bh(&lcw->lcw_lock);
+                       spin_unlock_bh(&lcw_pending_timers_lock);
+
+                       /* lock ordering */
+                       spin_lock_bh(&lcw->lcw_lock);
+                       spin_lock_bh(&lcw_pending_timers_lock);
+
+                       if (cfs_list_empty(&lcw->lcw_list)) {
+                               /* already removed from pending list */
+                               lcw->lcw_refcount--; /* -1 ref for callback */
+                               if (lcw->lcw_refcount == 0)
+                                       cfs_list_add(&lcw->lcw_list, &zombies);
+                               spin_unlock_bh(&lcw->lcw_lock);
                                 /* still hold lcw_pending_timers_lock */
                                 continue;
                         }
@@ -273,8 +273,8 @@ static int lcw_dispatch_main(void *data)
                         cfs_list_del_init(&lcw->lcw_list);
                         lcw->lcw_refcount--; /* -1 ref for pending list */
 
-                        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-                        cfs_spin_unlock_bh(&lcw->lcw_lock);
+                       spin_unlock_bh(&lcw_pending_timers_lock);
+                       spin_unlock_bh(&lcw->lcw_lock);
 
                         CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
                                lcw->lcw_pid);
@@ -288,12 +288,12 @@ static int lcw_dispatch_main(void *data)
                                         dumplog = 0;
                         }
 
-                        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-                        lcw->lcw_refcount--; /* -1 ref for callback */
-                        if (lcw->lcw_refcount == 0)
-                                cfs_list_add(&lcw->lcw_list, &zombies);
-                }
-                cfs_spin_unlock_bh(&lcw_pending_timers_lock);
+                       spin_lock_bh(&lcw_pending_timers_lock);
+                       lcw->lcw_refcount--; /* -1 ref for callback */
+                       if (lcw->lcw_refcount == 0)
+                               cfs_list_add(&lcw->lcw_list, &zombies);
+               }
+               spin_unlock_bh(&lcw_pending_timers_lock);
 
                 while (!cfs_list_empty(&zombies)) {
                         lcw = cfs_list_entry(lcw_pending_timers.next,
@@ -303,20 +303,20 @@ static int lcw_dispatch_main(void *data)
                 }
         }
 
-        cfs_complete(&lcw_stop_completion);
+       complete(&lcw_stop_completion);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 static void lcw_dispatch_start(void)
 {
-        int rc;
+       int rc;
 
-        ENTRY;
-        LASSERT(lcw_refcount == 1);
+       ENTRY;
+       LASSERT(lcw_refcount == 1);
 
-        cfs_init_completion(&lcw_stop_completion);
-        cfs_init_completion(&lcw_start_completion);
+       init_completion(&lcw_stop_completion);
+       init_completion(&lcw_start_completion);
         cfs_waitq_init(&lcw_event_waitq);
 
         CDEBUG(D_INFO, "starting dispatch thread\n");
@@ -326,27 +326,27 @@ static void lcw_dispatch_start(void)
                 EXIT;
                 return;
         }
-        cfs_wait_for_completion(&lcw_start_completion);
-        CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
+       wait_for_completion(&lcw_start_completion);
+       CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
 
-        EXIT;
+       EXIT;
 }
 
 static void lcw_dispatch_stop(void)
 {
-        ENTRY;
-        LASSERT(lcw_refcount == 0);
+       ENTRY;
+       LASSERT(lcw_refcount == 0);
 
-        CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
+       CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
 
-        cfs_set_bit(LCW_FLAG_STOP, &lcw_flags);
-        cfs_waitq_signal(&lcw_event_waitq);
+       set_bit(LCW_FLAG_STOP, &lcw_flags);
+       cfs_waitq_signal(&lcw_event_waitq);
 
-        cfs_wait_for_completion(&lcw_stop_completion);
+       wait_for_completion(&lcw_stop_completion);
 
-        CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
+       CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
 
-        EXIT;
+       EXIT;
 }
 
 struct lc_watchdog *lc_watchdog_add(int timeout,
@@ -362,7 +362,7 @@ struct lc_watchdog *lc_watchdog_add(int timeout,
                 RETURN(ERR_PTR(-ENOMEM));
         }
 
-        cfs_spin_lock_init(&lcw->lcw_lock);
+       spin_lock_init(&lcw->lcw_lock);
         lcw->lcw_refcount = 1; /* refcount for owner */
         lcw->lcw_task     = cfs_current();
         lcw->lcw_pid      = cfs_curproc_pid();
@@ -373,10 +373,10 @@ struct lc_watchdog *lc_watchdog_add(int timeout,
         CFS_INIT_LIST_HEAD(&lcw->lcw_list);
         cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
 
-        cfs_mutex_lock(&lcw_refcount_mutex);
-        if (++lcw_refcount == 1)
-                lcw_dispatch_start();
-        cfs_mutex_unlock(&lcw_refcount_mutex);
+       mutex_lock(&lcw_refcount_mutex);
+       if (++lcw_refcount == 1)
+               lcw_dispatch_start();
+       mutex_unlock(&lcw_refcount_mutex);
 
         /* Keep this working in case we enable them by default */
         if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
@@ -413,15 +413,15 @@ static void lcw_update_time(struct lc_watchdog *lcw, const char *message)
 
 static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
 {
-        cfs_spin_lock_bh(&lcw->lcw_lock);
-        if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
-                cfs_spin_lock_bh(&lcw_pending_timers_lock);
-                cfs_list_del_init(&lcw->lcw_list);
-                lcw->lcw_refcount--; /* -1 ref for pending list */
-                cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-        }
-
-        cfs_spin_unlock_bh(&lcw->lcw_lock);
+       spin_lock_bh(&lcw->lcw_lock);
+       if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+               spin_lock_bh(&lcw_pending_timers_lock);
+               cfs_list_del_init(&lcw->lcw_list);
+               lcw->lcw_refcount--; /* -1 ref for pending list */
+               spin_unlock_bh(&lcw_pending_timers_lock);
+       }
+
+       spin_unlock_bh(&lcw->lcw_lock);
 }
 
 void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
@@ -466,27 +466,27 @@ void lc_watchdog_delete(struct lc_watchdog *lcw)
 
         lcw_update_time(lcw, "stopped");
 
-        cfs_spin_lock_bh(&lcw->lcw_lock);
-        cfs_spin_lock_bh(&lcw_pending_timers_lock);
-        if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
-                cfs_list_del_init(&lcw->lcw_list);
-                lcw->lcw_refcount--; /* -1 ref for pending list */
-        }
+       spin_lock_bh(&lcw->lcw_lock);
+       spin_lock_bh(&lcw_pending_timers_lock);
+       if (unlikely(!cfs_list_empty(&lcw->lcw_list))) {
+               cfs_list_del_init(&lcw->lcw_list);
+               lcw->lcw_refcount--; /* -1 ref for pending list */
+       }
 
-        lcw->lcw_refcount--; /* -1 ref for owner */
-        dead = lcw->lcw_refcount == 0;
-        cfs_spin_unlock_bh(&lcw_pending_timers_lock);
-        cfs_spin_unlock_bh(&lcw->lcw_lock);
+       lcw->lcw_refcount--; /* -1 ref for owner */
+       dead = lcw->lcw_refcount == 0;
+       spin_unlock_bh(&lcw_pending_timers_lock);
+       spin_unlock_bh(&lcw->lcw_lock);
 
-        if (dead)
-                LIBCFS_FREE(lcw, sizeof(*lcw));
+       if (dead)
+               LIBCFS_FREE(lcw, sizeof(*lcw));
 
-        cfs_mutex_lock(&lcw_refcount_mutex);
-        if (--lcw_refcount == 0)
-                lcw_dispatch_stop();
-        cfs_mutex_unlock(&lcw_refcount_mutex);
+       mutex_lock(&lcw_refcount_mutex);
+       if (--lcw_refcount == 0)
+               lcw_dispatch_stop();
+       mutex_unlock(&lcw_refcount_mutex);
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(lc_watchdog_delete);
 
index 16b47b8..68d1740 100644 (file)
@@ -204,7 +204,7 @@ task_manager_notify(
     PLIST_ENTRY ListEntry = NULL; 
     PTASK_SLOT  TaskSlot  = NULL;
 
-    cfs_spin_lock(&(cfs_win_task_manger.Lock));
+       spin_lock(&(cfs_win_task_manger.Lock));
 
     ListEntry = cfs_win_task_manger.TaskList.Flink;
     while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
@@ -226,7 +226,7 @@ task_manager_notify(
         ListEntry = ListEntry->Flink;
     }
 
-    cfs_spin_unlock(&(cfs_win_task_manger.Lock));
+       spin_unlock(&(cfs_win_task_manger.Lock));
 }
 
 int
@@ -239,7 +239,7 @@ init_task_manager()
     cfs_win_task_manger.Magic = TASKMAN_MAGIC;
 
     /* initialize the spinlock protection */
-    cfs_spin_lock_init(&cfs_win_task_manger.Lock);
+       spin_lock_init(&cfs_win_task_manger.Lock);
 
     /* create slab memory cache */
     cfs_win_task_manger.slab = cfs_mem_cache_create(
@@ -285,7 +285,7 @@ cleanup_task_manager()
     }
 
     /* cleanup all the taskslots attached to the list */
-    cfs_spin_lock(&(cfs_win_task_manger.Lock));
+       spin_lock(&(cfs_win_task_manger.Lock));
 
     while (!IsListEmpty(&(cfs_win_task_manger.TaskList))) {
 
@@ -296,7 +296,7 @@ cleanup_task_manager()
         cleanup_task_slot(TaskSlot);
     }
 
-    cfs_spin_unlock(&cfs_win_task_manger.Lock);
+       spin_unlock(&cfs_win_task_manger.Lock);
 
     /* destroy the taskslot cache slab */
     cfs_mem_cache_destroy(cfs_win_task_manger.slab);
@@ -319,7 +319,7 @@ cfs_current()
     PLIST_ENTRY ListEntry = NULL; 
     PTASK_SLOT  TaskSlot  = NULL;
 
-    cfs_spin_lock(&(cfs_win_task_manger.Lock));
+       spin_lock(&(cfs_win_task_manger.Lock));
 
     ListEntry = cfs_win_task_manger.TaskList.Flink;
     while (ListEntry != (&(cfs_win_task_manger.TaskList))) {
@@ -415,7 +415,7 @@ cfs_current()
 
 errorout:
 
-    cfs_spin_unlock(&(cfs_win_task_manger.Lock));
+       spin_unlock(&(cfs_win_task_manger.Lock));
 
     if (!TaskSlot) {
         cfs_enter_debugger();
index 0d79b53..6d25d1b 100644 (file)
@@ -286,16 +286,15 @@ int FASTCALL cfs_atomic_sub_return(int i, cfs_atomic_t *v)
        return cfs_atomic_add_return(-i, v);
 }
 
-int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock)
+int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, spinlock_t *lock)
 {
-    if (cfs_atomic_read(v) != 1) {
-        return 0;
-    }
+       if (cfs_atomic_read(v) != 1)
+               return 0;
 
-       cfs_spin_lock(lock);
+       spin_lock(lock);
        if (cfs_atomic_dec_and_test(v))
                return 1;
-       cfs_spin_unlock(lock);
+       spin_unlock(lock);
        return 0;
 }
 
@@ -306,19 +305,19 @@ int FASTCALL cfs_atomic_dec_and_lock(cfs_atomic_t *v, cfs_spinlock_t *lock)
 
 
 void
-cfs_rwlock_init(cfs_rwlock_t * rwlock)
+rwlock_init(rwlock_t *rwlock)
 {
-    cfs_spin_lock_init(&rwlock->guard);
-    rwlock->count = 0;
+       spin_lock_init(&rwlock->guard);
+       rwlock->count = 0;
 }
 
 void
-cfs_rwlock_fini(cfs_rwlock_t * rwlock)
+cfs_rwlock_fini(rwlock_t *rwlock)
 {
 }
 
 void
-cfs_read_lock(cfs_rwlock_t * rwlock)
+read_lock(rwlock_t *rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -334,19 +333,19 @@ cfs_read_lock(cfs_rwlock_t * rwlock)
    
     slot->irql = KeRaiseIrqlToDpcLevel();
 
-    while (TRUE) {
-           cfs_spin_lock(&rwlock->guard);
-        if (rwlock->count >= 0)
-            break;
-        cfs_spin_unlock(&rwlock->guard);
-    }
+       while (TRUE) {
+               spin_lock(&rwlock->guard);
+                       if (rwlock->count >= 0)
+                               break;
+               spin_unlock(&rwlock->guard);
+       }
 
        rwlock->count++;
-       cfs_spin_unlock(&rwlock->guard);
+       spin_unlock(&rwlock->guard);
 }
 
 void
-cfs_read_unlock(cfs_rwlock_t * rwlock)
+read_unlock(rwlock_t *rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -359,20 +358,19 @@ cfs_read_unlock(cfs_rwlock_t * rwlock)
 
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     ASSERT(slot->Magic == TASKSLT_MAGIC);
-   
-    cfs_spin_lock(&rwlock->guard);
+
+       spin_lock(&rwlock->guard);
        ASSERT(rwlock->count > 0);
-    rwlock->count--;
-    if (rwlock < 0) {
-        cfs_enter_debugger();
-    }
-       cfs_spin_unlock(&rwlock->guard);
+       rwlock->count--;
+       if (rwlock < 0)
+               cfs_enter_debugger();
+       spin_unlock(&rwlock->guard);
 
-    KeLowerIrql(slot->irql);
+       KeLowerIrql(slot->irql);
 }
 
 void
-cfs_write_lock(cfs_rwlock_t * rwlock)
+write_lock(rwlock_t *rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -388,19 +386,19 @@ cfs_write_lock(cfs_rwlock_t * rwlock)
    
     slot->irql = KeRaiseIrqlToDpcLevel();
 
-    while (TRUE) {
-           cfs_spin_lock(&rwlock->guard);
-        if (rwlock->count == 0)
-            break;
-        cfs_spin_unlock(&rwlock->guard);
-    }
+       while (TRUE) {
+               spin_lock(&rwlock->guard);
+               if (rwlock->count == 0)
+                       break;
+               spin_unlock(&rwlock->guard);
+       }
 
        rwlock->count = -1;
-       cfs_spin_unlock(&rwlock->guard);
+       spin_unlock(&rwlock->guard);
 }
 
 void
-cfs_write_unlock(cfs_rwlock_t * rwlock)
+write_unlock(rwlock_t *rwlock)
 {
     cfs_task_t * task = cfs_current();
     PTASK_SLOT   slot = NULL;
@@ -413,11 +411,11 @@ cfs_write_unlock(cfs_rwlock_t * rwlock)
 
     slot = CONTAINING_RECORD(task, TASK_SLOT, task);
     ASSERT(slot->Magic == TASKSLT_MAGIC);
-   
-    cfs_spin_lock(&rwlock->guard);
+
+       spin_lock(&rwlock->guard);
        ASSERT(rwlock->count == -1);
-    rwlock->count = 0;
-       cfs_spin_unlock(&rwlock->guard);
+       rwlock->count = 0;
+       spin_unlock(&rwlock->guard);
 
-    KeLowerIrql(slot->irql);
+       KeLowerIrql(slot->irql);
 }
index 0792233..b64458f 100644 (file)
@@ -54,7 +54,7 @@ cfs_page_t * virt_to_page(void * addr)
     pg->addr = (void *)((__u64)addr & (~((__u64)PAGE_SIZE-1)));
     pg->mapping = addr;
     cfs_atomic_set(&pg->count, 1);
-    cfs_set_bit(PG_virt, &(pg->flags));
+       set_bit(PG_virt, &(pg->flags));
     cfs_enter_debugger();
     return pg;
 }
@@ -123,7 +123,7 @@ void cfs_free_page(cfs_page_t *pg)
     ASSERT(pg->addr  != NULL);
     ASSERT(cfs_atomic_read(&pg->count) <= 1);
 
-    if (!cfs_test_bit(PG_virt, &pg->flags)) {
+       if (!test_bit(PG_virt, &pg->flags)) {
         cfs_mem_cache_free(cfs_page_p_slab, pg->addr);
         cfs_atomic_dec(&libcfs_total_pages);
     } else {
@@ -374,7 +374,7 @@ void cfs_mem_cache_free(cfs_mem_cache_t * kmc, void * buf)
     ExFreeToNPagedLookasideList(&(kmc->npll), buf);
 }
 
-cfs_spinlock_t  shrinker_guard = {0};
+spinlock_t  shrinker_guard = {0};
 CFS_LIST_HEAD(shrinker_hdr);
 cfs_timer_t shrinker_timer = {0};
 
@@ -382,22 +382,22 @@ struct cfs_shrinker * cfs_set_shrinker(int seeks, shrink_callback cb)
 {
     struct cfs_shrinker * s = (struct cfs_shrinker *)
         cfs_alloc(sizeof(struct cfs_shrinker), CFS_ALLOC_ZERO);
-    if (s) {
-        s->cb = cb;
-        s->seeks = seeks;
-        s->nr = 2;
-        cfs_spin_lock(&shrinker_guard);
-        cfs_list_add(&s->list, &shrinker_hdr); 
-        cfs_spin_unlock(&shrinker_guard);
-    }
-
-    return s;
+       if (s) {
+               s->cb = cb;
+               s->seeks = seeks;
+               s->nr = 2;
+               spin_lock(&shrinker_guard);
+               cfs_list_add(&s->list, &shrinker_hdr);
+               spin_unlock(&shrinker_guard);
+       }
+
+       return s;
 }
 
 void cfs_remove_shrinker(struct cfs_shrinker *s)
 {
-    struct cfs_shrinker *tmp;
-    cfs_spin_lock(&shrinker_guard);
+       struct cfs_shrinker *tmp;
+       spin_lock(&shrinker_guard);
 #if TRUE
     cfs_list_for_each_entry_typed(tmp, &shrinker_hdr,
                                   struct cfs_shrinker, list) {
@@ -409,22 +409,22 @@ void cfs_remove_shrinker(struct cfs_shrinker *s)
 #else
     cfs_list_del(&s->list);
 #endif
-    cfs_spin_unlock(&shrinker_guard);
-    cfs_free(s);
+       spin_unlock(&shrinker_guard);
+       cfs_free(s);
 }
 
 /* time ut test proc */
 void shrinker_timer_proc(ulong_ptr_t arg)
 {
-    struct cfs_shrinker *s;
-    cfs_spin_lock(&shrinker_guard);
-
-    cfs_list_for_each_entry_typed(s, &shrinker_hdr,
-                                  struct cfs_shrinker, list) {
-            s->cb(s->nr, __GFP_FS);
-    }
-    cfs_spin_unlock(&shrinker_guard);
-    cfs_timer_arm(&shrinker_timer, 300);
+       struct cfs_shrinker *s;
+       spin_lock(&shrinker_guard);
+
+       cfs_list_for_each_entry_typed(s, &shrinker_hdr,
+                                     struct cfs_shrinker, list) {
+               s->cb(s->nr, __GFP_FS);
+       }
+       spin_unlock(&shrinker_guard);
+       cfs_timer_arm(&shrinker_timer, 300);
 }
 
 int start_shrinker_timer()
index 330b3b6..faf5f52 100644 (file)
@@ -148,7 +148,7 @@ int cfs_create_thread(int (*func)(void *), void *arg, unsigned long flag)
  */
 
 
-static CFS_DECLARE_RWSEM(cfs_symbol_lock);
+static DECLARE_RWSEM(cfs_symbol_lock);
 CFS_LIST_HEAD(cfs_symbol_list);
 
 int libcfs_is_mp_system = FALSE;
@@ -174,7 +174,7 @@ cfs_symbol_get(const char *name)
     cfs_list_t              *walker;
     struct cfs_symbol       *sym = NULL;
 
-    cfs_down_read(&cfs_symbol_lock);
+       down_read(&cfs_symbol_lock);
     cfs_list_for_each(walker, &cfs_symbol_list) {
         sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
@@ -182,7 +182,7 @@ cfs_symbol_get(const char *name)
             break;
         }
     }
-    cfs_up_read(&cfs_symbol_lock);
+       up_read(&cfs_symbol_lock);
 
     if (sym != NULL)
         return sym->value;
@@ -210,7 +210,7 @@ cfs_symbol_put(const char *name)
     cfs_list_t              *walker;
     struct cfs_symbol       *sym = NULL;
 
-    cfs_down_read(&cfs_symbol_lock);
+       down_read(&cfs_symbol_lock);
     cfs_list_for_each(walker, &cfs_symbol_list) {
         sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
@@ -219,7 +219,7 @@ cfs_symbol_put(const char *name)
             break;
         }
     }
-    cfs_up_read(&cfs_symbol_lock);
+       up_read(&cfs_symbol_lock);
 
     LASSERT(sym != NULL);
 }
@@ -257,17 +257,17 @@ cfs_symbol_register(const char *name, const void *value)
     new->ref = 0;
     CFS_INIT_LIST_HEAD(&new->sym_list);
 
-    cfs_down_write(&cfs_symbol_lock);
-    cfs_list_for_each(walker, &cfs_symbol_list) {
-        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
-        if (!strcmp(sym->name, name)) {
-            cfs_up_write(&cfs_symbol_lock);
-            cfs_free(new);
-            return 0; // alreay registerred
-        }
-    }
-    cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
-    cfs_up_write(&cfs_symbol_lock);
+       down_write(&cfs_symbol_lock);
+       cfs_list_for_each(walker, &cfs_symbol_list) {
+               sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+               if (!strcmp(sym->name, name)) {
+                       up_write(&cfs_symbol_lock);
+                       cfs_free(new);
+                       return 0; /* alreay registerred */
+               }
+       }
+       cfs_list_add_tail(&new->sym_list, &cfs_symbol_list);
+       up_write(&cfs_symbol_lock);
 
     return 0;
 }
@@ -293,7 +293,7 @@ cfs_symbol_unregister(const char *name)
     cfs_list_t              *nxt;
     struct cfs_symbol       *sym = NULL;
 
-    cfs_down_write(&cfs_symbol_lock);
+       down_write(&cfs_symbol_lock);
     cfs_list_for_each_safe(walker, nxt, &cfs_symbol_list) {
         sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
         if (!strcmp(sym->name, name)) {
@@ -303,7 +303,7 @@ cfs_symbol_unregister(const char *name)
             break;
         }
     }
-    cfs_up_write(&cfs_symbol_lock);
+       up_write(&cfs_symbol_lock);
 }
 
 /*
@@ -326,15 +326,15 @@ cfs_symbol_clean()
     cfs_list_t          *walker;
     struct cfs_symbol   *sym = NULL;
 
-    cfs_down_write(&cfs_symbol_lock);
-    cfs_list_for_each(walker, &cfs_symbol_list) {
-        sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
-        LASSERT(sym->ref == 0);
-        cfs_list_del (&sym->sym_list);
-        cfs_free(sym);
-    }
-    cfs_up_write(&cfs_symbol_lock);
-    return;
+       down_write(&cfs_symbol_lock);
+       cfs_list_for_each(walker, &cfs_symbol_list) {
+               sym = cfs_list_entry (walker, struct cfs_symbol, sym_list);
+               LASSERT(sym->ref == 0);
+               cfs_list_del (&sym->sym_list);
+               cfs_free(sym);
+       }
+       up_write(&cfs_symbol_lock);
+       return;
 }
 
 
@@ -761,16 +761,16 @@ void cfs_libc_init();
 int
 libcfs_arch_init(void)
 {
-    int         rc;
+       int             rc;
+       spinlock_t      lock;
 
-    cfs_spinlock_t  lock;
-    /* Workground to check the system is MP build or UP build */
-    cfs_spin_lock_init(&lock);
-    cfs_spin_lock(&lock);
-    libcfs_is_mp_system = (int)lock.lock;
-    /* MP build system: it's a real spin, for UP build system, it
-       only raises the IRQL to DISPATCH_LEVEL */
-    cfs_spin_unlock(&lock);
+       /* Workground to check the system is MP build or UP build */
+       spin_lock_init(&lock);
+       spin_lock(&lock);
+       libcfs_is_mp_system = (int)lock.lock;
+       /* MP build system: it's a real spin, for UP build system, it
+        * only raises the IRQL to DISPATCH_LEVEL */
+       spin_unlock(&lock);
 
     /* initialize libc routines (confliction between libcnptr.lib
        and kernel ntoskrnl.lib) */
index 6f46d09..e86413c 100644 (file)
@@ -70,15 +70,15 @@ cfs_sysctl_table_header_t       root_table_header;
 /* The global lock to protect all the access */
 
 #if LIBCFS_PROCFS_SPINLOCK
-cfs_spinlock_t                  proc_fs_lock;
+spinlock_t                     proc_fs_lock;
 
-#define INIT_PROCFS_LOCK()      cfs_spin_lock_init(&proc_fs_lock)
-#define LOCK_PROCFS()           cfs_spin_lock(&proc_fs_lock)
-#define UNLOCK_PROCFS()         cfs_spin_unlock(&proc_fs_lock)
+#define INIT_PROCFS_LOCK()     spin_lock_init(&proc_fs_lock)
+#define LOCK_PROCFS()          spin_lock(&proc_fs_lock)
+#define UNLOCK_PROCFS()                spin_unlock(&proc_fs_lock)
 
 #else
 
-cfs_mutex_t                     proc_fs_lock;
+struct mutex                           proc_fs_lock;
 
 #define INIT_PROCFS_LOCK()      cfs_init_mutex(&proc_fs_lock)
 #define LOCK_PROCFS()           cfs_mutex_down(&proc_fs_lock)
@@ -1836,7 +1836,7 @@ int seq_open(struct file *file, const struct seq_operations *op)
                file->private_data = p;
        }
        memset(p, 0, sizeof(*p));
-       cfs_mutex_init(&p->lock);
+       mutex_init(&p->lock);
        p->op = op;
 
        /*
@@ -1870,7 +1870,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
        void *p;
        int err = 0;
 
-       cfs_mutex_lock(&m->lock);
+       mutex_lock(&m->lock);
        /*
         * seq_file->op->..m_start/m_stop/m_next may do special actions
         * or optimisations based on the file->f_version, so we want to
@@ -1963,7 +1963,7 @@ Done:
        else
                *ppos += copied;
        file->f_version = m->version;
-       cfs_mutex_unlock(&m->lock);
+       mutex_unlock(&m->lock);
        return copied;
 Enomem:
        err = -ENOMEM;
@@ -2040,7 +2040,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
        struct seq_file *m = (struct seq_file *)file->private_data;
        long long retval = -EINVAL;
 
-       cfs_mutex_lock(&m->lock);
+       mutex_lock(&m->lock);
        m->version = file->f_version;
        switch (origin) {
                case 1:
@@ -2064,7 +2064,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int origin)
                        }
        }
        file->f_version = m->version;
-       cfs_mutex_unlock(&m->lock);
+       mutex_unlock(&m->lock);
        return retval;
 }
 EXPORT_SYMBOL(seq_lseek);
index fc62f39..dc4a301 100644 (file)
@@ -60,7 +60,7 @@ void cfs_waitq_init(cfs_waitq_t *waitq)
     waitq->magic = CFS_WAITQ_MAGIC;
     waitq->flags = 0;
     CFS_INIT_LIST_HEAD(&(waitq->waiters));
-    cfs_spin_lock_init(&(waitq->guard));
+       spin_lock_init(&(waitq->guard));
 }
 
 /*
@@ -169,7 +169,7 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
     LASSERT(waitqid < CFS_WAITQ_CHANNELS);
 
-    cfs_spin_lock(&(waitq->guard));
+       spin_lock(&(waitq->guard));
     LASSERT(link->waitq[waitqid].waitq == NULL);
     link->waitq[waitqid].waitq = waitq;
     if (link->flags & CFS_WAITQ_EXCLUSIVE) {
@@ -177,7 +177,7 @@ void cfs_waitq_add_internal(cfs_waitq_t *waitq,
     } else {
         cfs_list_add(&link->waitq[waitqid].link, &waitq->waiters);
     }
-    cfs_spin_unlock(&(waitq->guard));
+       spin_unlock(&(waitq->guard));
 }
 /*
  * cfs_waitq_add
@@ -254,7 +254,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
     LASSERT(link->magic == CFS_WAITLINK_MAGIC);
 
-    cfs_spin_lock(&(waitq->guard));
+       spin_lock(&(waitq->guard));
 
     for (i=0; i < CFS_WAITQ_CHANNELS; i++) {
         if (link->waitq[i].waitq == waitq)
@@ -268,7 +268,7 @@ void cfs_waitq_del( cfs_waitq_t *waitq,
         cfs_enter_debugger();
     }
 
-    cfs_spin_unlock(&(waitq->guard));
+       spin_unlock(&(waitq->guard));
 }
 
 /*
@@ -319,7 +319,7 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
     LASSERT(waitq != NULL);
     LASSERT(waitq->magic == CFS_WAITQ_MAGIC);
 
-    cfs_spin_lock(&waitq->guard);
+       spin_lock(&waitq->guard);
     cfs_list_for_each_entry_typed(scan, &waitq->waiters, 
                             cfs_waitlink_channel_t,
                             link) {
@@ -337,8 +337,8 @@ void cfs_waitq_signal_nr(cfs_waitq_t *waitq, int nr)
             break;
     }
 
-    cfs_spin_unlock(&waitq->guard);
-    return;
+       spin_unlock(&waitq->guard);
+       return;
 }
 
 /*
index c784f6a..293b630 100644 (file)
@@ -346,7 +346,7 @@ KsAllocateKsTsdu()
 {
     PKS_TSDU    KsTsdu = NULL;
 
-    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+       spin_lock(&(ks_data.ksnd_tsdu_lock));
 
     if (!cfs_list_empty (&(ks_data.ksnd_freetsdus))) {
 
@@ -362,7 +362,7 @@ KsAllocateKsTsdu()
                         ks_data.ksnd_tsdu_slab, 0);
     }
 
-    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+       spin_unlock(&(ks_data.ksnd_tsdu_lock));
 
     if (NULL != KsTsdu) {
         RtlZeroMemory(KsTsdu, ks_data.ksnd_tsdu_size);
@@ -415,14 +415,14 @@ KsPutKsTsdu(
     PKS_TSDU  KsTsdu
     )
 {
-    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
-    if (ks_data.ksnd_nfreetsdus > 128) {
-        KsFreeKsTsdu(KsTsdu);
-    } else {
-        cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
-        ks_data.ksnd_nfreetsdus++;
-    }
-    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+       spin_lock(&(ks_data.ksnd_tsdu_lock));
+       if (ks_data.ksnd_nfreetsdus > 128) {
+               KsFreeKsTsdu(KsTsdu);
+       } else {
+               cfs_list_add_tail( &(KsTsdu->Link), &(ks_data.ksnd_freetsdus));
+               ks_data.ksnd_nfreetsdus++;
+       }
+       spin_unlock(&(ks_data.ksnd_tsdu_lock));
 }
 
 /* with tconn lock acquired */
@@ -1282,7 +1282,7 @@ KsInitializeKsTsduMgr(
     TsduMgr->NumOfTsdu  = 0;
     TsduMgr->TotalBytes = 0;
 
-    cfs_spin_lock_init(&TsduMgr->Lock);
+       spin_lock_init(&TsduMgr->Lock);
 }
 
 
@@ -2953,7 +2953,7 @@ KsAcceptCompletionRoutine(
 
     LASSERT(child->kstc_type == kstt_child);
 
-    cfs_spin_lock(&(child->kstc_lock));
+       spin_lock(&(child->kstc_lock));
 
     LASSERT(parent->kstc_state == ksts_listening);
     LASSERT(child->kstc_state == ksts_connecting);
@@ -2971,7 +2971,7 @@ KsAcceptCompletionRoutine(
             FALSE
             );
 
-        cfs_spin_unlock(&(child->kstc_lock));
+       spin_unlock(&(child->kstc_lock));
 
         KsPrint((2, "KsAcceptCompletionRoutine: singal parent: %p (child: %p)\n",
                     parent, child));
@@ -2983,7 +2983,7 @@ KsAcceptCompletionRoutine(
         child->child.kstc_busy = FALSE;
         child->kstc_state = ksts_associated;
 
-        cfs_spin_unlock(&(child->kstc_lock));
+       spin_unlock(&(child->kstc_lock));
     }
 
     /* now free the Irp */
@@ -3001,7 +3001,7 @@ KsSearchIpAddress(PUNICODE_STRING  DeviceName)
     ks_addr_slot_t * slot = NULL;
     PLIST_ENTRY      list = NULL;
 
-    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+       spin_lock(&ks_data.ksnd_addrs_lock);
 
     list = ks_data.ksnd_addrs_list.Flink;
     while (list != &ks_data.ksnd_addrs_list) {
@@ -3016,15 +3016,15 @@ KsSearchIpAddress(PUNICODE_STRING  DeviceName)
         slot = NULL;
     }
 
-    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+       spin_unlock(&ks_data.ksnd_addrs_lock);
 
-    return slot;
+       return slot;
 }
 
 void
 KsCleanupIpAddresses()
 {
-    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+       spin_lock(&ks_data.ksnd_addrs_lock);
 
     while (!IsListEmpty(&ks_data.ksnd_addrs_list)) {
 
@@ -3038,7 +3038,7 @@ KsCleanupIpAddresses()
     }
 
     cfs_assert(ks_data.ksnd_naddrs == 0);
-    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+       spin_unlock(&ks_data.ksnd_addrs_lock);
 }
 
 VOID
@@ -3081,7 +3081,7 @@ KsAddAddressHandler(
 
             slot = cfs_alloc(sizeof(ks_addr_slot_t) + DeviceName->Length, CFS_ALLOC_ZERO);
             if (slot != NULL) {
-                cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+               spin_lock(&ks_data.ksnd_addrs_lock);
                 InsertTailList(&ks_data.ksnd_addrs_list, &slot->link);
                 sprintf(slot->iface, "eth%d", ks_data.ksnd_naddrs++);
                 slot->ip_addr = ntohl(IpAddress->in_addr);
@@ -3091,7 +3091,7 @@ KsAddAddressHandler(
                 slot->devname.Length = DeviceName->Length;
                 slot->devname.MaximumLength = DeviceName->Length + sizeof(WCHAR);
                 slot->devname.Buffer = slot->buffer;
-                cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+               spin_unlock(&ks_data.ksnd_addrs_lock);
 
                 KsPrint((0, "KsAddAddressHandle: %s added: ip=%xh(%d.%d.%d.%d)\n",
                             slot->iface, IpAddress->in_addr,
@@ -3142,7 +3142,7 @@ KsRegisterPnpHandlers()
 
     /* initialize the global ks_data members */
     RtlInitUnicodeString(&ks_data.ksnd_client_name, TDILND_MODULE_NAME);
-    cfs_spin_lock_init(&ks_data.ksnd_addrs_lock);
+       spin_lock_init(&ks_data.ksnd_addrs_lock);
     InitializeListHead(&ks_data.ksnd_addrs_list);
 
     /* register the pnp handlers */
@@ -3209,15 +3209,15 @@ KsGetVacancyBacklog(
 
         cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
             child = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
-            cfs_spin_lock(&(child->kstc_lock));
+           spin_lock(&(child->kstc_lock));
 
             if (!child->child.kstc_busy) {
                 LASSERT(child->kstc_state == ksts_associated);
                 child->child.kstc_busy = TRUE;
-                cfs_spin_unlock(&(child->kstc_lock));
+               spin_unlock(&(child->kstc_lock));
                 break;
             } else {
-                cfs_spin_unlock(&(child->kstc_lock));
+               spin_unlock(&(child->kstc_lock));
                 child = NULL;
             }
         }
@@ -3273,7 +3273,7 @@ KsConnectEventHandler(
 
     LASSERT(parent->kstc_type == kstt_listener);
 
-    cfs_spin_lock(&(parent->kstc_lock));
+       spin_lock(&(parent->kstc_lock));
 
     if (parent->kstc_state == ksts_listening) {
 
@@ -3311,11 +3311,11 @@ KsConnectEventHandler(
 
         if (child) {
 
-            cfs_spin_lock(&(child->kstc_lock));
+           spin_lock(&(child->kstc_lock));
             child->child.kstc_info.ConnectionInfo = ConnectionInfo;
             child->child.kstc_info.Remote = ConnectionInfo->RemoteAddress;
             child->kstc_state = ksts_connecting;
-            cfs_spin_unlock(&(child->kstc_lock));
+           spin_unlock(&(child->kstc_lock));
 
         } else {
 
@@ -3354,13 +3354,13 @@ KsConnectEventHandler(
         goto errorout;
     }
 
-    cfs_spin_unlock(&(parent->kstc_lock));
+       spin_unlock(&(parent->kstc_lock));
 
     return Status;
 
 errorout:
 
-    cfs_spin_unlock(&(parent->kstc_lock));
+       spin_unlock(&(parent->kstc_lock));
 
     *AcceptIrp = NULL;
     *ConnectionContext = NULL;
@@ -3436,10 +3436,10 @@ KsDisconnectHelper(PKS_DISCONNECT_WORKITEM WorkItem)
 
     KeSetEvent(&(WorkItem->Event), 0, FALSE);
 
-    cfs_spin_lock(&(tconn->kstc_lock));
-    cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
-    cfs_spin_unlock(&(tconn->kstc_lock));
-    ks_put_tconn(tconn);
+       spin_lock(&(tconn->kstc_lock));
+       cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DISCONNECT_BUSY);
+       spin_unlock(&(tconn->kstc_lock));
+       ks_put_tconn(tconn);
 }
 
 
@@ -3485,7 +3485,7 @@ KsDisconnectEventHandler(
                  tconn, DisconnectFlags));
 
     ks_get_tconn(tconn);
-    cfs_spin_lock(&(tconn->kstc_lock));
+    spin_lock(&(tconn->kstc_lock));
 
     WorkItem = &(tconn->kstc_disconnect);
 
@@ -3518,7 +3518,7 @@ KsDisconnectEventHandler(
         }
     }
 
-    cfs_spin_unlock(&(tconn->kstc_lock));
+    spin_unlock(&(tconn->kstc_lock));
     ks_put_tconn(tconn);
 
     return  (Status);
@@ -4331,16 +4331,16 @@ ks_create_tconn()
                 tconn
             );
 
-        cfs_spin_lock_init(&(tconn->kstc_lock));
+       spin_lock_init(&(tconn->kstc_lock));
 
         ks_get_tconn(tconn);
-        cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+       spin_lock(&(ks_data.ksnd_tconn_lock));
 
         /* attach it into global list in ks_data */
 
         cfs_list_add(&(tconn->kstc_list), &(ks_data.ksnd_tconns));
         ks_data.ksnd_ntconns++;
-        cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+       spin_unlock(&(ks_data.ksnd_tconn_lock));
 
         tconn->kstc_rcv_wnd = tconn->kstc_snd_wnd = 0x10000;
     }
@@ -4368,7 +4368,7 @@ ks_free_tconn(ks_tconn_t * tconn)
 {
     LASSERT(cfs_atomic_read(&(tconn->kstc_refcount)) == 0);
 
-    cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+       spin_lock(&(ks_data.ksnd_tconn_lock));
 
     /* remove it from the global list */
     cfs_list_del(&tconn->kstc_list);
@@ -4379,7 +4379,7 @@ ks_free_tconn(ks_tconn_t * tconn)
     if (ks_data.ksnd_ntconns == 0) {
         cfs_wake_event(&ks_data.ksnd_tconn_exit);
     }
-    cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+       spin_unlock(&(ks_data.ksnd_tconn_lock));
 
     /* free the structure memory */
     cfs_mem_cache_free(ks_data.ksnd_tconn_slab, tconn);
@@ -4534,13 +4534,13 @@ ks_put_tconn(
 {
     if (cfs_atomic_dec_and_test(&(tconn->kstc_refcount))) {
 
-        cfs_spin_lock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
 
         if ( ( tconn->kstc_type == kstt_child ||
                tconn->kstc_type == kstt_sender ) &&
              ( tconn->kstc_state == ksts_connected ) ) {
 
-            cfs_spin_unlock(&(tconn->kstc_lock));
+           spin_unlock(&(tconn->kstc_lock));
 
             ks_abort_tconn(tconn);
 
@@ -4557,7 +4557,7 @@ ks_put_tconn(
                 cfs_set_flag(tconn->kstc_flags, KS_TCONN_DESTROY_BUSY);
             }
 
-            cfs_spin_unlock(&(tconn->kstc_lock));
+           spin_unlock(&(tconn->kstc_lock));
         }
     }
 }
@@ -4621,8 +4621,8 @@ ks_destroy_tconn(
                 tconn->kstc_addr.FileObject
                 );
 
-        cfs_spin_lock(&tconn->child.kstc_parent->kstc_lock);
-        cfs_spin_lock(&tconn->kstc_lock);
+       spin_lock(&tconn->child.kstc_parent->kstc_lock);
+       spin_lock(&tconn->kstc_lock);
 
         tconn->kstc_state = ksts_inited;
 
@@ -4646,8 +4646,8 @@ ks_destroy_tconn(
             tconn->child.kstc_queued = FALSE;
         }
 
-        cfs_spin_unlock(&tconn->kstc_lock);
-        cfs_spin_unlock(&tconn->child.kstc_parent->kstc_lock);
+       spin_unlock(&tconn->kstc_lock);
+       spin_unlock(&tconn->child.kstc_parent->kstc_lock);
 
         /* drop the reference of the parent tconn */
         ks_put_tconn(tconn->child.kstc_parent);
@@ -5222,7 +5222,7 @@ ks_build_tconn(
                     NULL
                     );
 
-    cfs_spin_lock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
 
     if (NT_SUCCESS(status)) {
 
@@ -5233,7 +5233,7 @@ ks_build_tconn(
         tconn->sender.kstc_info.ConnectionInfo = ConnectionInfo;
         tconn->sender.kstc_info.Remote         = ConnectionInfo->RemoteAddress;
 
-        cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_unlock(&(tconn->kstc_lock));
 
     } else {
 
@@ -5247,7 +5247,7 @@ ks_build_tconn(
         rc = cfs_error_code(status);
 
         tconn->kstc_state = ksts_associated;
-        cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_unlock(&(tconn->kstc_lock));
 
         /* disassocidate the connection and the address object,
            after cleanup,  it's safe to set the state to abort ... */
@@ -5405,7 +5405,7 @@ ks_disconnect_tconn(
             cfs_enter_debugger();
         }
 
-        cfs_spin_lock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
 
         /* cleanup the tsdumgr Lists */
         KsCleanupTsdu (tconn);
@@ -5422,7 +5422,7 @@ ks_disconnect_tconn(
         info->ConnectionInfo = NULL;
         info->Remote = NULL;
 
-        cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_unlock(&(tconn->kstc_lock));
     }
 
     status = STATUS_SUCCESS;
@@ -5460,7 +5460,7 @@ ks_abort_tconn(
     WorkItem = &(tconn->kstc_disconnect);
 
     ks_get_tconn(tconn);
-    cfs_spin_lock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
 
     if (tconn->kstc_state != ksts_connected) {
         ks_put_tconn(tconn);
@@ -5480,7 +5480,7 @@ ks_abort_tconn(
         }
     }
 
-    cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_unlock(&(tconn->kstc_lock));
 }
 
 
@@ -5556,7 +5556,7 @@ KsQueueTdiEngine(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr)
     engs = &TsduMgr->Slot;
 
     if (!engs->queued) {
-        cfs_spin_lock(&engm->lock);
+       spin_lock(&engm->lock);
         if (!engs->queued) {
             cfs_list_add_tail(&engs->link, &engm->list);
             engs->queued = TRUE;
@@ -5565,7 +5565,7 @@ KsQueueTdiEngine(ks_tconn_t * tconn, PKS_TSDUMGR TsduMgr)
             engs->tsdumgr = TsduMgr;
             KeSetEvent(&(engm->start),0, FALSE);
         }
-        cfs_spin_unlock(&engm->lock);
+       spin_unlock(&engm->lock);
         KsPrint((4, "KsQueueTdiEngine: TsduMgr=%p is queued to engine %p\n",
                     TsduMgr, engm));
     }
@@ -5582,7 +5582,7 @@ KsRemoveTdiEngine(PKS_TSDUMGR TsduMgr)
     if (engs->queued) {
         engm = engs->emgr;
         LASSERT(engm != NULL);
-        cfs_spin_lock(&engm->lock);
+       spin_lock(&engm->lock);
         if (engs->queued) {
             cfs_list_del(&engs->link);
             engs->queued = FALSE;
@@ -5590,7 +5590,7 @@ KsRemoveTdiEngine(PKS_TSDUMGR TsduMgr)
             engs->emgr = NULL;
             engs->tsdumgr = NULL;
         }
-        cfs_spin_unlock(&engm->lock);
+       spin_unlock(&engm->lock);
         KsPrint((4, "KsQueueTdiEngine: TsduMgr %p is removed from engine %p\n",
                     TsduMgr, engm));
     }
@@ -5807,9 +5807,9 @@ KsDeliveryEngineThread(void * context)
 
         cfs_wait_event_internal(&engm->start, 0);
 
-        cfs_spin_lock(&engm->lock);
+       spin_lock(&engm->lock);
         if (cfs_list_empty(&engm->list)) {
-            cfs_spin_unlock(&engm->lock);
+           spin_unlock(&engm->lock);
             continue;
         }
 
@@ -5820,7 +5820,7 @@ KsDeliveryEngineThread(void * context)
         LASSERT(engs->queued);
         engs->emgr = NULL;
         engs->queued = FALSE;
-        cfs_spin_unlock(&engm->lock);
+       spin_unlock(&engm->lock);
 
         tconn = engs->tconn;
         LASSERT(tconn->kstc_magic == KS_TCONN_MAGIC);
@@ -5859,7 +5859,7 @@ ks_init_tdi_data()
     /* initialize tconn related globals */
     RtlZeroMemory(&ks_data, sizeof(ks_tdi_data_t));
 
-    cfs_spin_lock_init(&ks_data.ksnd_tconn_lock);
+       spin_lock_init(&ks_data.ksnd_tconn_lock);
     CFS_INIT_LIST_HEAD(&ks_data.ksnd_tconns);
     cfs_init_event(&ks_data.ksnd_tconn_exit, TRUE, FALSE);
 
@@ -5872,7 +5872,7 @@ ks_init_tdi_data()
     }
 
     /* initialize tsdu related globals */
-    cfs_spin_lock_init(&ks_data.ksnd_tsdu_lock);
+       spin_lock_init(&ks_data.ksnd_tsdu_lock);
     CFS_INIT_LIST_HEAD(&ks_data.ksnd_freetsdus);
     ks_data.ksnd_tsdu_size = TDINAL_TSDU_DEFAULT_SIZE; /* 64k */
     ks_data.ksnd_tsdu_slab = cfs_mem_cache_create(
@@ -5895,7 +5895,7 @@ ks_init_tdi_data()
         goto errorout;
     }
     for (i = 0; i < ks_data.ksnd_engine_nums; i++) {
-        cfs_spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
+               spin_lock_init(&ks_data.ksnd_engine_mgr[i].lock);
         cfs_init_event(&ks_data.ksnd_engine_mgr[i].start, TRUE, FALSE);
         cfs_init_event(&ks_data.ksnd_engine_mgr[i].exit, TRUE, FALSE);
         CFS_INIT_LIST_HEAD(&ks_data.ksnd_engine_mgr[i].list);
@@ -5954,12 +5954,12 @@ ks_fini_tdi_data()
     }
 
     /* we need wait until all the tconn are freed */
-    cfs_spin_lock(&(ks_data.ksnd_tconn_lock));
+       spin_lock(&(ks_data.ksnd_tconn_lock));
 
     if (cfs_list_empty(&(ks_data.ksnd_tconns))) {
         cfs_wake_event(&ks_data.ksnd_tconn_exit);
     }
-    cfs_spin_unlock(&(ks_data.ksnd_tconn_lock));
+       spin_unlock(&(ks_data.ksnd_tconn_lock));
 
     /* now wait on the tconn exit event */
     cfs_wait_event_internal(&ks_data.ksnd_tconn_exit, 0);
@@ -5969,7 +5969,7 @@ ks_fini_tdi_data()
     ks_data.ksnd_tconn_slab = NULL;
 
     /* clean up all the tsud buffers in the free list */
-    cfs_spin_lock(&(ks_data.ksnd_tsdu_lock));
+       spin_lock(&(ks_data.ksnd_tsdu_lock));
     cfs_list_for_each (list, &ks_data.ksnd_freetsdus) {
         KsTsdu = cfs_list_entry (list, KS_TSDU, Link);
 
@@ -5977,7 +5977,7 @@ ks_fini_tdi_data()
                 ks_data.ksnd_tsdu_slab,
                 KsTsdu );
     }
-    cfs_spin_unlock(&(ks_data.ksnd_tsdu_lock));
+       spin_unlock(&(ks_data.ksnd_tsdu_lock));
 
     /* it's safe to delete the tsdu slab ... */
     cfs_mem_cache_destroy(ks_data.ksnd_tsdu_slab);
@@ -6101,22 +6101,22 @@ ks_replenish_backlogs(
         /* create the backlog child tconn */
         backlog = ks_create_child_tconn(parent);
 
-        cfs_spin_lock(&(parent->kstc_lock));
+       spin_lock(&(parent->kstc_lock));
 
         if (backlog) {
-            cfs_spin_lock(&backlog->kstc_lock);
+           spin_lock(&backlog->kstc_lock);
             /* attch it into the listing list of daemon */
             cfs_list_add( &backlog->child.kstc_link,
                       &parent->listener.kstc_listening.list );
             parent->listener.kstc_listening.num++;
 
             backlog->child.kstc_queued = TRUE;
-            cfs_spin_unlock(&backlog->kstc_lock);
+           spin_unlock(&backlog->kstc_lock);
         } else {
             cfs_enter_debugger();
         }
 
-        cfs_spin_unlock(&(parent->kstc_lock));
+       spin_unlock(&(parent->kstc_lock));
     }
 }
 
@@ -6151,13 +6151,13 @@ ks_start_listen(ks_tconn_t *tconn, int nbacklog)
         return rc;
     }
 
-    cfs_spin_lock(&(tconn->kstc_lock));
-    tconn->listener.nbacklog = nbacklog;
-    tconn->kstc_state = ksts_listening;
-    cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
-    cfs_spin_unlock(&(tconn->kstc_lock));
+       spin_lock(&(tconn->kstc_lock));
+       tconn->listener.nbacklog = nbacklog;
+       tconn->kstc_state = ksts_listening;
+       cfs_set_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
+       spin_unlock(&(tconn->kstc_lock));
 
-    return rc;
+       return rc;
 }
 
 void
@@ -6169,7 +6169,7 @@ ks_stop_listen(ks_tconn_t *tconn)
     /* reset all tdi event callbacks to NULL */
     KsResetHandlers (tconn);
 
-    cfs_spin_lock(&tconn->kstc_lock);
+       spin_lock(&tconn->kstc_lock);
 
     cfs_clear_flag(tconn->kstc_flags, KS_TCONN_DAEMON_STARTED);
 
@@ -6181,7 +6181,7 @@ ks_stop_listen(ks_tconn_t *tconn)
         ks_put_tconn(backlog);
     }
 
-    cfs_spin_unlock(&tconn->kstc_lock);
+       spin_unlock(&tconn->kstc_lock);
 
     /* wake up it from the waiting on new incoming connections */
     KeSetEvent(&tconn->listener.kstc_accept_event, 0, FALSE);
@@ -6217,10 +6217,10 @@ ks_wait_child_tconn(
 
     ks_replenish_backlogs(parent, parent->listener.nbacklog);
 
-    cfs_spin_lock(&(parent->kstc_lock));
+       spin_lock(&(parent->kstc_lock));
 
-    if (parent->listener.kstc_listening.num <= 0) {
-        cfs_spin_unlock(&(parent->kstc_lock));
+       if (parent->listener.kstc_listening.num <= 0) {
+               spin_unlock(&(parent->kstc_lock));
         return -1;
     }
 
@@ -6231,7 +6231,7 @@ again:
     cfs_list_for_each(tmp, &(parent->listener.kstc_listening.list)) {
         backlog = cfs_list_entry (tmp, ks_tconn_t, child.kstc_link);
 
-        cfs_spin_lock(&(backlog->kstc_lock));
+       spin_lock(&(backlog->kstc_lock));
 
         if (backlog->child.kstc_accepted) {
 
@@ -6245,16 +6245,16 @@ again:
             parent->listener.kstc_listening.num--;
             backlog->child.kstc_queueno = 1;
 
-            cfs_spin_unlock(&(backlog->kstc_lock));
+           spin_unlock(&(backlog->kstc_lock));
 
             break;
         } else {
-            cfs_spin_unlock(&(backlog->kstc_lock));
+           spin_unlock(&(backlog->kstc_lock));
             backlog = NULL;
         }
     }
 
-    cfs_spin_unlock(&(parent->kstc_lock));
+       spin_unlock(&(parent->kstc_lock));
 
     /* we need wait until new incoming connections are requested
        or the case of shuting down the listenig daemon thread  */
@@ -6270,11 +6270,11 @@ again:
                 NULL
                 );
 
-        cfs_spin_lock(&(parent->kstc_lock));
+       spin_lock(&(parent->kstc_lock));
 
         /* check whether it's exptected to exit ? */
         if (!cfs_is_flag_set(parent->kstc_flags, KS_TCONN_DAEMON_STARTED)) {
-            cfs_spin_unlock(&(parent->kstc_lock));
+           spin_unlock(&(parent->kstc_lock));
         } else {
             goto again;
         }
@@ -6524,7 +6524,7 @@ int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
     ks_addr_slot_t * slot = NULL;
     PLIST_ENTRY      list = NULL;
 
-    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+       spin_lock(&ks_data.ksnd_addrs_lock);
 
     list = ks_data.ksnd_addrs_list.Flink;
     while (list != &ks_data.ksnd_addrs_list) {
@@ -6539,7 +6539,7 @@ int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask)
         slot = NULL;
     }
 
-    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+       spin_unlock(&ks_data.ksnd_addrs_lock);
 
     return (int)(slot == NULL);
 }
@@ -6550,7 +6550,7 @@ int libcfs_ipif_enumerate(char ***names)
     PLIST_ENTRY      list = NULL;
     int              nips = 0;
 
-    cfs_spin_lock(&ks_data.ksnd_addrs_lock);
+       spin_lock(&ks_data.ksnd_addrs_lock);
 
     *names = cfs_alloc(sizeof(char *) * ks_data.ksnd_naddrs, CFS_ALLOC_ZERO);
     if (*names == NULL) {
@@ -6569,7 +6569,7 @@ int libcfs_ipif_enumerate(char ***names)
 
 errorout:
 
-    cfs_spin_unlock(&ks_data.ksnd_addrs_lock);
+       spin_unlock(&ks_data.ksnd_addrs_lock);
     return nips;
 }
 
@@ -6626,7 +6626,7 @@ void libcfs_sock_abort_accept(struct socket *sock)
 {
     LASSERT(sock->kstc_type == kstt_listener);
 
-    cfs_spin_lock(&(sock->kstc_lock));
+       spin_lock(&(sock->kstc_lock));
 
     /* clear the daemon flag */
     cfs_clear_flag(sock->kstc_flags, KS_TCONN_DAEMON_STARTED);
@@ -6634,7 +6634,7 @@ void libcfs_sock_abort_accept(struct socket *sock)
     /* wake up it from the waiting on new incoming connections */
     KeSetEvent(&sock->listener.kstc_accept_event, 0, FALSE);
 
-    cfs_spin_unlock(&(sock->kstc_lock));
+       spin_unlock(&(sock->kstc_lock));
 }
 
 /*
@@ -6718,7 +6718,7 @@ int libcfs_sock_getaddr(struct socket *socket, int remote, __u32 *ip, int *port)
 {
     PTRANSPORT_ADDRESS  taddr = NULL;
 
-    cfs_spin_lock(&socket->kstc_lock);
+       spin_lock(&socket->kstc_lock);
     if (remote) {
         if (socket->kstc_type == kstt_sender) {
             taddr = socket->sender.kstc_info.Remote;
@@ -6736,12 +6736,12 @@ int libcfs_sock_getaddr(struct socket *socket, int remote, __u32 *ip, int *port)
         if (port != NULL)
             *port = ntohs (addr->sin_port);
     } else {
-        cfs_spin_unlock(&socket->kstc_lock);
-        return -ENOTCONN;
-    }
+               spin_unlock(&socket->kstc_lock);
+               return -ENOTCONN;
+       }
 
-    cfs_spin_unlock(&socket->kstc_lock);
-    return 0;
+       spin_unlock(&socket->kstc_lock);
+       return 0;
 }
 
 int libcfs_sock_write(struct socket *sock, void *buffer, int nob, int timeout)
index 168c973..54c8783 100644 (file)
@@ -46,7 +46,7 @@ static unsigned int pages_factor[CFS_TCD_TYPE_MAX] = {
 
 char *cfs_trace_console_buffers[CFS_NR_CPUS][CFS_TCD_TYPE_MAX];
 
-cfs_rw_semaphore_t cfs_tracefile_sem;
+struct rw_semaphore cfs_tracefile_sem;
 
 int cfs_tracefile_init_arch()
 {
@@ -54,7 +54,7 @@ int cfs_tracefile_init_arch()
        int    j;
        struct cfs_trace_cpu_data *tcd;
 
-       cfs_init_rwsem(&cfs_tracefile_sem);
+       init_rwsem(&cfs_tracefile_sem);
 
        /* initialize trace_data */
        memset(cfs_trace_data, 0, sizeof(cfs_trace_data));
@@ -111,27 +111,27 @@ void cfs_tracefile_fini_arch()
                cfs_trace_data[i] = NULL;
        }
 
-       cfs_fini_rwsem(&cfs_tracefile_sem);
+       fini_rwsem(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_read_lock()
 {
-       cfs_down_read(&cfs_tracefile_sem);
+       down_read(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_read_unlock()
 {
-       cfs_up_read(&cfs_tracefile_sem);
+       up_read(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_write_lock()
 {
-       cfs_down_write(&cfs_tracefile_sem);
+       down_write(&cfs_tracefile_sem);
 }
 
 void cfs_tracefile_write_unlock()
 {
-       cfs_up_write(&cfs_tracefile_sem);
+       up_write(&cfs_tracefile_sem);
 }
 
 cfs_trace_buf_type_t cfs_trace_buf_idx_get()
index 3ae1f45..811af88 100644 (file)
@@ -49,7 +49,7 @@ typedef struct cfs_wi_sched {
        cfs_list_t              ws_list;        /* chain on global list */
 #ifdef __KERNEL__
        /** serialised workitems */
-       cfs_spinlock_t          ws_lock;
+       spinlock_t              ws_lock;
        /** where schedulers sleep */
        cfs_waitq_t             ws_waitq;
 #endif
@@ -79,7 +79,7 @@ typedef struct cfs_wi_sched {
 
 struct cfs_workitem_data {
        /** serialize */
-       cfs_spinlock_t          wi_glock;
+       spinlock_t              wi_glock;
        /** list of all schedulers */
        cfs_list_t              wi_scheds;
        /** WI module is initialized */
@@ -92,13 +92,13 @@ struct cfs_workitem_data {
 static inline void
 cfs_wi_sched_lock(cfs_wi_sched_t *sched)
 {
-        cfs_spin_lock(&sched->ws_lock);
+       spin_lock(&sched->ws_lock);
 }
 
 static inline void
 cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
 {
-        cfs_spin_unlock(&sched->ws_lock);
+       spin_unlock(&sched->ws_lock);
 }
 
 static inline int
@@ -123,13 +123,13 @@ cfs_wi_sched_cansleep(cfs_wi_sched_t *sched)
 static inline void
 cfs_wi_sched_lock(cfs_wi_sched_t *sched)
 {
-        cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
 }
 
 static inline void
 cfs_wi_sched_unlock(cfs_wi_sched_t *sched)
 {
-        cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 }
 
 #endif /* __KERNEL__ */
@@ -262,13 +262,13 @@ cfs_wi_scheduler (void *arg)
        if (sched->ws_cptab != NULL)
                cfs_cpt_bind(sched->ws_cptab, sched->ws_cpt);
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
 
        LASSERT(sched->ws_starting == 1);
        sched->ws_starting--;
        sched->ws_nthreads++;
 
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
        cfs_wi_sched_lock(sched);
 
@@ -328,11 +328,11 @@ cfs_wi_scheduler (void *arg)
 
         cfs_wi_sched_unlock(sched);
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        sched->ws_nthreads--;
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
-        return 0;
+       return 0;
 }
 
 #else /* __KERNEL__ */
@@ -340,12 +340,12 @@ cfs_wi_scheduler (void *arg)
 int
 cfs_wi_check_events (void)
 {
-        int               n = 0;
-        cfs_workitem_t   *wi;
+       int               n = 0;
+       cfs_workitem_t   *wi;
 
-        cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
 
-        for (;;) {
+       for (;;) {
                struct cfs_wi_sched     *sched = NULL;
                struct cfs_wi_sched     *tmp;
 
@@ -368,18 +368,18 @@ cfs_wi_check_events (void)
                LASSERT(sched->ws_nscheduled > 0);
                sched->ws_nscheduled--;
 
-                LASSERT (wi->wi_scheduled);
-                wi->wi_scheduled = 0;
-                cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               LASSERT(wi->wi_scheduled);
+               wi->wi_scheduled = 0;
+               spin_unlock(&cfs_wi_data.wi_glock);
 
-                n++;
-                (*wi->wi_action) (wi);
+               n++;
+               (*wi->wi_action) (wi);
 
-                cfs_spin_lock(&cfs_wi_data.wi_glock);
-        }
+               spin_lock(&cfs_wi_data.wi_glock);
+       }
 
-        cfs_spin_unlock(&cfs_wi_data.wi_glock);
-        return n;
+       spin_unlock(&cfs_wi_data.wi_glock);
+       return n;
 }
 
 #endif
@@ -392,37 +392,37 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
        LASSERT(cfs_wi_data.wi_init);
        LASSERT(!cfs_wi_data.wi_stopping);
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        if (sched->ws_stopping) {
                CDEBUG(D_INFO, "%s is in progress of stopping\n",
                       sched->ws_name);
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
                return;
        }
 
        LASSERT(!cfs_list_empty(&sched->ws_list));
        sched->ws_stopping = 1;
 
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
        i = 2;
 #ifdef __KERNEL__
        cfs_waitq_broadcast(&sched->ws_waitq);
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        while (sched->ws_nthreads > 0) {
                CDEBUG(IS_PO2(++i) ? D_WARNING : D_NET,
                       "waiting for %d threads of WI sched[%s] to terminate\n",
                       sched->ws_nthreads, sched->ws_name);
 
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
                cfs_pause(cfs_time_seconds(1) / 20);
-               cfs_spin_lock(&cfs_wi_data.wi_glock);
+               spin_lock(&cfs_wi_data.wi_glock);
        }
 
        cfs_list_del(&sched->ws_list);
 
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 #else
        SET_BUT_UNUSED(i);
 #endif
@@ -453,7 +453,7 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
        sched->ws_cpt = cpt;
 
 #ifdef __KERNEL__
-       cfs_spin_lock_init(&sched->ws_lock);
+       spin_lock_init(&sched->ws_lock);
        cfs_waitq_init(&sched->ws_waitq);
 #endif
        CFS_INIT_LIST_HEAD(&sched->ws_runq);
@@ -463,15 +463,15 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
        rc = 0;
 #ifdef __KERNEL__
        while (nthrs > 0)  {
-               cfs_spin_lock(&cfs_wi_data.wi_glock);
+               spin_lock(&cfs_wi_data.wi_glock);
                while (sched->ws_starting > 0) {
-                       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+                       spin_unlock(&cfs_wi_data.wi_glock);
                        cfs_schedule();
-                       cfs_spin_lock(&cfs_wi_data.wi_glock);
+                       spin_lock(&cfs_wi_data.wi_glock);
                }
 
                sched->ws_starting++;
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
 
                rc = cfs_create_thread(cfs_wi_scheduler, sched, 0);
                if (rc >= 0) {
@@ -482,13 +482,13 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
                CERROR("Failed to create thread for WI scheduler %s: %d\n",
                       name, rc);
 
-               cfs_spin_lock(&cfs_wi_data.wi_glock);
+               spin_lock(&cfs_wi_data.wi_glock);
 
                /* make up for cfs_wi_sched_destroy */
                cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
                sched->ws_starting--;
 
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
 
                cfs_wi_sched_destroy(sched);
                return rc;
@@ -496,9 +496,9 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
 #else
        SET_BUT_UNUSED(rc);
 #endif
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        cfs_list_add(&sched->ws_list, &cfs_wi_data.wi_scheds);
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
        *sched_pp = sched;
        return 0;
@@ -510,7 +510,7 @@ cfs_wi_startup(void)
 {
        memset(&cfs_wi_data, 0, sizeof(cfs_wi_data));
 
-       cfs_spin_lock_init(&cfs_wi_data.wi_glock);
+       spin_lock_init(&cfs_wi_data.wi_glock);
        CFS_INIT_LIST_HEAD(&cfs_wi_data.wi_scheds);
        cfs_wi_data.wi_init = 1;
 
@@ -522,9 +522,9 @@ cfs_wi_shutdown (void)
 {
        struct cfs_wi_sched     *sched;
 
-       cfs_spin_lock(&cfs_wi_data.wi_glock);
+       spin_lock(&cfs_wi_data.wi_glock);
        cfs_wi_data.wi_stopping = 1;
-       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+       spin_unlock(&cfs_wi_data.wi_glock);
 
 #ifdef __KERNEL__
        /* nobody should contend on this list */
@@ -534,14 +534,14 @@ cfs_wi_shutdown (void)
        }
 
        cfs_list_for_each_entry(sched, &cfs_wi_data.wi_scheds, ws_list) {
-               cfs_spin_lock(&cfs_wi_data.wi_glock);
+               spin_lock(&cfs_wi_data.wi_glock);
 
                while (sched->ws_nthreads != 0) {
-                       cfs_spin_unlock(&cfs_wi_data.wi_glock);
+                       spin_unlock(&cfs_wi_data.wi_glock);
                        cfs_pause(cfs_time_seconds(1) / 20);
-                       cfs_spin_lock(&cfs_wi_data.wi_glock);
+                       spin_lock(&cfs_wi_data.wi_glock);
                }
-               cfs_spin_unlock(&cfs_wi_data.wi_glock);
+               spin_unlock(&cfs_wi_data.wi_glock);
        }
 #endif
        while (!cfs_list_empty(&cfs_wi_data.wi_scheds)) {
index 90e6b32..259fbaf 100644 (file)
@@ -175,14 +175,14 @@ lnet_net_lock_current(void)
 
 #ifdef __KERNEL__
 
-#define lnet_ptl_lock(ptl)     cfs_spin_lock(&(ptl)->ptl_lock)
-#define lnet_ptl_unlock(ptl)   cfs_spin_unlock(&(ptl)->ptl_lock)
-#define lnet_eq_wait_lock()    cfs_spin_lock(&the_lnet.ln_eq_wait_lock)
-#define lnet_eq_wait_unlock()  cfs_spin_unlock(&the_lnet.ln_eq_wait_lock)
-#define lnet_ni_lock(ni)       cfs_spin_lock(&(ni)->ni_lock)
-#define lnet_ni_unlock(ni)     cfs_spin_unlock(&(ni)->ni_lock)
-#define LNET_MUTEX_LOCK(m)     cfs_mutex_lock(m)
-#define LNET_MUTEX_UNLOCK(m)   cfs_mutex_unlock(m)
+#define lnet_ptl_lock(ptl)     spin_lock(&(ptl)->ptl_lock)
+#define lnet_ptl_unlock(ptl)   spin_unlock(&(ptl)->ptl_lock)
+#define lnet_eq_wait_lock()    spin_lock(&the_lnet.ln_eq_wait_lock)
+#define lnet_eq_wait_unlock()  spin_unlock(&the_lnet.ln_eq_wait_lock)
+#define lnet_ni_lock(ni)       spin_lock(&(ni)->ni_lock)
+#define lnet_ni_unlock(ni)     spin_unlock(&(ni)->ni_lock)
+#define LNET_MUTEX_LOCK(m)     mutex_lock(m)
+#define LNET_MUTEX_UNLOCK(m)   mutex_unlock(m)
 
 #else /* !__KERNEL__ */
 
index 4d3d614..24e3c1a 100644 (file)
@@ -412,7 +412,7 @@ struct lnet_tx_queue {
 
 typedef struct lnet_ni {
 #ifdef __KERNEL__
-       cfs_spinlock_t          ni_lock;
+       spinlock_t              ni_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
        int                     ni_lock;
@@ -638,7 +638,7 @@ struct lnet_match_table {
 
 typedef struct lnet_portal {
 #ifdef __KERNEL__
-       cfs_spinlock_t          ptl_lock;
+       spinlock_t              ptl_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
        int                     ptl_lock;
@@ -721,7 +721,7 @@ typedef struct
        struct lnet_res_container       ln_eq_container;
 #ifdef __KERNEL__
        cfs_waitq_t                     ln_eq_waitq;
-       cfs_spinlock_t                  ln_eq_wait_lock;
+       spinlock_t                      ln_eq_wait_lock;
 #else
 # ifndef HAVE_LIBPTHREAD
        int                             ln_eq_wait_lock;
@@ -773,10 +773,10 @@ typedef struct
        cfs_list_t                      ln_rcd_zombie;
 #ifdef __KERNEL__
        /* serialise startup/shutdown */
-       cfs_semaphore_t                 ln_rc_signal;
+       struct semaphore                ln_rc_signal;
 
-       cfs_mutex_t                     ln_api_mutex;
-       cfs_mutex_t                     ln_lnd_mutex;
+       struct mutex                    ln_api_mutex;
+       struct mutex                    ln_lnd_mutex;
 #else
 # ifndef HAVE_LIBPTHREAD
        int                             ln_api_mutex;
index 7c0a8d1..028945a 100644 (file)
@@ -63,9 +63,9 @@ mxlnd_free_pages(kmx_pages_t *p)
         for (i = 0; i < npages; i++) {
                 if (p->mxg_pages[i] != NULL) {
                         __free_page(p->mxg_pages[i]);
-                        cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
-                        kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
-                        cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
+                       spin_lock(&kmxlnd_data.kmx_mem_lock);
+                       kmxlnd_data.kmx_mem_used -= PAGE_SIZE;
+                       spin_unlock(&kmxlnd_data.kmx_mem_lock);
                 }
         }
 
@@ -96,9 +96,9 @@ mxlnd_alloc_pages(kmx_pages_t **pp, int npages)
                         mxlnd_free_pages(p);
                         return -ENOMEM;
                 }
-                cfs_spin_lock(&kmxlnd_data.kmx_mem_lock);
-                kmxlnd_data.kmx_mem_used += PAGE_SIZE;
-                cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock);
+               spin_lock(&kmxlnd_data.kmx_mem_lock);
+               kmxlnd_data.kmx_mem_used += PAGE_SIZE;
+               spin_unlock(&kmxlnd_data.kmx_mem_lock);
         }
 
         *pp = p;
@@ -393,7 +393,7 @@ mxlnd_thread_start(int (*fn)(void *arg), void *arg)
         int     i   = (int) ((long) arg);
 
         cfs_atomic_inc(&kmxlnd_data.kmx_nthreads);
-        cfs_init_completion(&kmxlnd_data.kmx_completions[i]);
+       init_completion(&kmxlnd_data.kmx_completions[i]);
 
         pid = cfs_create_thread(fn, arg, 0);
         if (pid < 0) {
@@ -414,7 +414,7 @@ mxlnd_thread_stop(long id)
 {
         int     i       = (int) id;
         cfs_atomic_dec (&kmxlnd_data.kmx_nthreads);
-        cfs_complete(&kmxlnd_data.kmx_completions[i]);
+       complete(&kmxlnd_data.kmx_completions[i]);
 }
 
 /**
@@ -451,8 +451,8 @@ mxlnd_shutdown (lnet_ni_t *ni)
 
                 /* wakeup request_waitds */
                 mx_wakeup(kmxlnd_data.kmx_endpt);
-                cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
-                cfs_up(&kmxlnd_data.kmx_conn_sem);
+               up(&kmxlnd_data.kmx_tx_queue_sem);
+               up(&kmxlnd_data.kmx_conn_sem);
                 mxlnd_sleep(2 * CFS_HZ);
 
                 /* fall through */
@@ -462,13 +462,13 @@ mxlnd_shutdown (lnet_ni_t *ni)
                 CDEBUG(D_NET, "waiting on threads\n");
                 /* wait for threads to complete */
                 for (i = 0; i < nthreads; i++) {
-                        cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                       wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                 }
                 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
 
                 CDEBUG(D_NET, "freeing completions\n");
                 MXLND_FREE(kmxlnd_data.kmx_completions,
-                            nthreads * sizeof(cfs_completion_t));
+                           nthreads * sizeof(struct completion));
 
                 /* fall through */
 
@@ -558,25 +558,25 @@ mxlnd_startup (lnet_ni_t *ni)
         kmxlnd_data.kmx_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
         CDEBUG(D_NET, "my incarnation is %llu\n", kmxlnd_data.kmx_incarnation);
 
-        cfs_rwlock_init (&kmxlnd_data.kmx_global_lock);
-        cfs_spin_lock_init (&kmxlnd_data.kmx_mem_lock);
+       rwlock_init (&kmxlnd_data.kmx_global_lock);
+       spin_lock_init (&kmxlnd_data.kmx_mem_lock);
 
         CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_reqs);
         CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_conn_zombies);
         CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_orphan_msgs);
-        cfs_spin_lock_init (&kmxlnd_data.kmx_conn_lock);
-        cfs_sema_init(&kmxlnd_data.kmx_conn_sem, 0);
+       spin_lock_init (&kmxlnd_data.kmx_conn_lock);
+       sema_init(&kmxlnd_data.kmx_conn_sem, 0);
 
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
                 CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_peers[i]);
         }
 
         CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_idle);
-        cfs_spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
-        kmxlnd_data.kmx_tx_next_cookie = 1;
-        CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
-        cfs_spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
-        cfs_sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
+       spin_lock_init (&kmxlnd_data.kmx_tx_idle_lock);
+       kmxlnd_data.kmx_tx_next_cookie = 1;
+       CFS_INIT_LIST_HEAD (&kmxlnd_data.kmx_tx_queue);
+       spin_lock_init (&kmxlnd_data.kmx_tx_queue_lock);
+       sema_init(&kmxlnd_data.kmx_tx_queue_sem, 0);
 
         kmxlnd_data.kmx_init = MXLND_INIT_DATA;
         /*****************************************************/
@@ -601,13 +601,13 @@ mxlnd_startup (lnet_ni_t *ni)
         /* start threads */
 
         MXLND_ALLOC(kmxlnd_data.kmx_completions,
-                     nthreads * sizeof(cfs_completion_t));
+                    nthreads * sizeof(struct completion));
         if (kmxlnd_data.kmx_completions == NULL) {
                 CERROR("failed to alloc kmxlnd_data.kmx_completions\n");
                 goto failed;
         }
         memset(kmxlnd_data.kmx_completions, 0,
-               nthreads * sizeof(cfs_completion_t));
+              nthreads * sizeof(struct completion));
 
         CDEBUG(D_NET, "using %d %s in mx_wait_any()\n",
                 *kmxlnd_tunables.kmx_n_waitd,
@@ -620,11 +620,11 @@ mxlnd_startup (lnet_ni_t *ni)
                         cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
                         mx_wakeup(kmxlnd_data.kmx_endpt);
                         for (--i; i >= 0; i--) {
-                                cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                               wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                         }
                         LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
                         MXLND_FREE(kmxlnd_data.kmx_completions,
-                                nthreads * sizeof(cfs_completion_t));
+                               nthreads * sizeof(struct completion));
 
                         goto failed;
                 }
@@ -635,11 +635,11 @@ mxlnd_startup (lnet_ni_t *ni)
                 cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
                 mx_wakeup(kmxlnd_data.kmx_endpt);
                 for (--i; i >= 0; i--) {
-                        cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                       wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                 }
                 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
                 MXLND_FREE(kmxlnd_data.kmx_completions,
-                        nthreads * sizeof(cfs_completion_t));
+                       nthreads * sizeof(struct completion));
                 goto failed;
         }
         ret = mxlnd_thread_start(mxlnd_timeoutd, (void*)((long)i++));
@@ -647,13 +647,13 @@ mxlnd_startup (lnet_ni_t *ni)
                 CERROR("Starting mxlnd_timeoutd failed with %d\n", ret);
                 cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
                 mx_wakeup(kmxlnd_data.kmx_endpt);
-                cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
+               up(&kmxlnd_data.kmx_tx_queue_sem);
                 for (--i; i >= 0; i--) {
-                        cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                       wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                 }
                 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
                 MXLND_FREE(kmxlnd_data.kmx_completions,
-                        nthreads * sizeof(cfs_completion_t));
+                       nthreads * sizeof(struct completion));
                 goto failed;
         }
         ret = mxlnd_thread_start(mxlnd_connd, (void*)((long)i++));
@@ -661,13 +661,13 @@ mxlnd_startup (lnet_ni_t *ni)
                 CERROR("Starting mxlnd_connd failed with %d\n", ret);
                 cfs_atomic_set(&kmxlnd_data.kmx_shutdown, 1);
                 mx_wakeup(kmxlnd_data.kmx_endpt);
-                cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
+               up(&kmxlnd_data.kmx_tx_queue_sem);
                 for (--i; i >= 0; i--) {
-                        cfs_wait_for_completion(&kmxlnd_data.kmx_completions[i]);
+                       wait_for_completion(&kmxlnd_data.kmx_completions[i]);
                 }
                 LASSERT(cfs_atomic_read(&kmxlnd_data.kmx_nthreads) == 0);
                 MXLND_FREE(kmxlnd_data.kmx_completions,
-                        nthreads * sizeof(cfs_completion_t));
+                       nthreads * sizeof(struct completion));
                 goto failed;
         }
 
index fdb0942..57b3146 100644 (file)
 /* provide wrappers around LIBCFS_ALLOC/FREE to keep MXLND specific
  * memory usage stats that include pages */
 
-#define MXLND_ALLOC(x, size) \
-        do { \
-                cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
-                kmxlnd_data.kmx_mem_used += size; \
-                cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
-                LIBCFS_ALLOC(x, size); \
-                if (unlikely(x == NULL)) { \
-                        cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
-                        kmxlnd_data.kmx_mem_used -= size; \
-                        cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
-                } \
-        } while (0)
-
-#define MXLND_FREE(x, size) \
-        do { \
-                cfs_spin_lock(&kmxlnd_data.kmx_mem_lock); \
-                kmxlnd_data.kmx_mem_used -= size; \
-                cfs_spin_unlock(&kmxlnd_data.kmx_mem_lock); \
-                LIBCFS_FREE(x, size); \
-        } while (0)
+#define MXLND_ALLOC(x, size)                                   \
+       do {                                                    \
+               spin_lock(&kmxlnd_data.kmx_mem_lock);           \
+               kmxlnd_data.kmx_mem_used += size;               \
+               spin_unlock(&kmxlnd_data.kmx_mem_lock);         \
+               LIBCFS_ALLOC(x, size);                          \
+               if (unlikely(x == NULL)) {                      \
+                       spin_lock(&kmxlnd_data.kmx_mem_lock);   \
+                       kmxlnd_data.kmx_mem_used -= size;       \
+                       spin_unlock(&kmxlnd_data.kmx_mem_lock); \
+               }                                               \
+       } while (0)
+
+#define MXLND_FREE(x, size)                                    \
+       do {                                                    \
+               spin_lock(&kmxlnd_data.kmx_mem_lock);           \
+               kmxlnd_data.kmx_mem_used -= size;               \
+               spin_unlock(&kmxlnd_data.kmx_mem_lock);         \
+               LIBCFS_FREE(x, size);                           \
+       } while (0)
 
 
 typedef struct kmx_tunables
@@ -220,19 +220,19 @@ typedef struct kmx_data
         int                 kmx_init;           /* initialization state */
         cfs_atomic_t        kmx_shutdown;       /* shutting down? */
         cfs_atomic_t        kmx_nthreads;       /* number of threads */
-        cfs_completion_t   *kmx_completions;   /* array of completion structs */
-        lnet_ni_t          *kmx_ni;             /* the LND instance */
-        u64                 kmx_incarnation;    /* my incarnation value */
-        long                kmx_mem_used;       /* memory used */
-        mx_endpoint_t       kmx_endpt;          /* the MX endpoint */
-        mx_endpoint_addr_t  kmx_epa;            /* the MX endpoint address */
-
-        cfs_rwlock_t        kmx_global_lock;    /* global lock */
-        cfs_spinlock_t      kmx_mem_lock;       /* memory accounting lock */
-
-        cfs_list_t          kmx_conn_reqs;     /* list of connection requests */
-        cfs_spinlock_t      kmx_conn_lock;      /* connection list lock */
-        cfs_semaphore_t     kmx_conn_sem;       /* semaphore for connection request list */
+       struct completion   *kmx_completions;   /* array of completion struct */
+       lnet_ni_t           *kmx_ni;            /* the LND instance */
+       u64                 kmx_incarnation;    /* my incarnation value */
+       long                kmx_mem_used;       /* memory used */
+       mx_endpoint_t       kmx_endpt;          /* the MX endpoint */
+       mx_endpoint_addr_t  kmx_epa;            /* the MX endpoint address */
+
+       rwlock_t            kmx_global_lock;    /* global lock */
+       spinlock_t          kmx_mem_lock;       /* memory accounting lock */
+
+       cfs_list_t          kmx_conn_reqs;      /* list of connection reqs */
+       spinlock_t          kmx_conn_lock;      /* connection list lock */
+       struct semaphore    kmx_conn_sem;       /* connection request list */
         cfs_list_t          kmx_conn_zombies;   /* list of zombie connections */
         cfs_list_t          kmx_orphan_msgs;    /* list of txs to cancel */
 
@@ -244,12 +244,12 @@ typedef struct kmx_data
 
         struct kmx_ctx     *kmx_txs;            /* all tx descriptors */
         cfs_list_t          kmx_tx_idle;        /* list of idle tx */
-        cfs_spinlock_t      kmx_tx_idle_lock;   /* lock for idle tx list */
-        s32                 kmx_tx_used;        /* txs in use */
-        u64                 kmx_tx_next_cookie; /* unique id for tx */
-        cfs_list_t          kmx_tx_queue;       /* generic send queue */
-        cfs_spinlock_t      kmx_tx_queue_lock;  /* lock for generic sends */
-        cfs_semaphore_t     kmx_tx_queue_sem;   /* semaphore for tx queue */
+       spinlock_t          kmx_tx_idle_lock;   /* lock for idle tx list */
+       s32                 kmx_tx_used;        /* txs in use */
+       u64                 kmx_tx_next_cookie; /* unique id for tx */
+       cfs_list_t          kmx_tx_queue;       /* generic send queue */
+       spinlock_t          kmx_tx_queue_lock;  /* lock for generic sends */
+       struct semaphore    kmx_tx_queue_sem;   /* semaphore for tx queue */
 } kmx_data_t;
 
 #define MXLND_INIT_NOTHING      0       /* in the beginning, there was nothing... */
@@ -409,7 +409,7 @@ typedef struct kmx_conn
 
         mx_endpoint_addr_t  mxk_epa;            /* peer's endpoint address */
 
-        cfs_spinlock_t      mxk_lock;           /* lock */
+       spinlock_t          mxk_lock;           /* lock */
         unsigned long       mxk_timeout;        /* expiration of oldest pending tx/rx */
         unsigned long       mxk_last_tx;        /* when last tx completed with success */
         unsigned long       mxk_last_rx;        /* when last rx completed */
@@ -530,19 +530,19 @@ do {                                                            \
 } while (0)
 
 
-#define mxlnd_conn_decref(conn)                                       \
-do {                                                                  \
-        LASSERT(conn != NULL);                                        \
-        LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0);          \
-        if (cfs_atomic_dec_and_test(&(conn)->mxk_refcount)) {         \
-                cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);            \
-                LASSERT((conn)->mxk_status == MXLND_CONN_DISCONNECT); \
-                CDEBUG(D_NET, "adding conn %p to zombies\n", (conn)); \
-                cfs_list_add_tail(&(conn)->mxk_zombie,                \
-                                  &kmxlnd_data.kmx_conn_zombies);     \
-                cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);          \
-                cfs_up(&kmxlnd_data.kmx_conn_sem);                    \
-        }                                                             \
+#define mxlnd_conn_decref(conn)                                                \
+do {                                                                   \
+       LASSERT(conn != NULL);                                          \
+       LASSERT(cfs_atomic_read(&(conn)->mxk_refcount) > 0);            \
+       if (cfs_atomic_dec_and_test(&(conn)->mxk_refcount)) {           \
+               spin_lock(&kmxlnd_data.kmx_conn_lock);                  \
+               LASSERT((conn)->mxk_status == MXLND_CONN_DISCONNECT);   \
+               CDEBUG(D_NET, "adding conn %p to zombies\n", (conn));   \
+               cfs_list_add_tail(&(conn)->mxk_zombie,                  \
+                                &kmxlnd_data.kmx_conn_zombies);        \
+               spin_unlock(&kmxlnd_data.kmx_conn_lock);                \
+               up(&kmxlnd_data.kmx_conn_sem);                          \
+       }                                                               \
 } while (0)
 
 #define mxlnd_valid_msg_type(type)                              \
index f1c17a7..7d8ec3b 100644 (file)
@@ -171,16 +171,16 @@ mxlnd_get_idle_rx(kmx_conn_t *conn)
 
         rxs = &conn->mxk_rx_idle;
 
-        cfs_spin_lock(&conn->mxk_lock);
+       spin_lock(&conn->mxk_lock);
 
-        if (cfs_list_empty (rxs)) {
-                cfs_spin_unlock(&conn->mxk_lock);
-                return NULL;
-        }
+       if (cfs_list_empty (rxs)) {
+               spin_unlock(&conn->mxk_lock);
+               return NULL;
+       }
 
-        rx = cfs_list_entry (rxs->next, kmx_ctx_t, mxc_list);
-        cfs_list_del_init(&rx->mxc_list);
-        cfs_spin_unlock(&conn->mxk_lock);
+       rx = cfs_list_entry (rxs->next, kmx_ctx_t, mxc_list);
+       cfs_list_del_init(&rx->mxc_list);
+       spin_unlock(&conn->mxk_lock);
 
 #if MXLND_DEBUG
         if (rx->mxc_get != rx->mxc_put) {
@@ -220,25 +220,25 @@ mxlnd_put_idle_rx(kmx_ctx_t *rx)
         rx->mxc_put++;
         LASSERT(rx->mxc_get == rx->mxc_put);
 
-        cfs_spin_lock(&conn->mxk_lock);
-        cfs_list_add(&rx->mxc_list, rxs);
-        cfs_spin_unlock(&conn->mxk_lock);
-        return 0;
+       spin_lock(&conn->mxk_lock);
+       cfs_list_add(&rx->mxc_list, rxs);
+       spin_unlock(&conn->mxk_lock);
+       return 0;
 }
 
 kmx_ctx_t *
 mxlnd_get_idle_tx(void)
 {
-        cfs_list_t              *tmp    = &kmxlnd_data.kmx_tx_idle;
-        kmx_ctx_t               *tx     = NULL;
+       cfs_list_t              *tmp    = &kmxlnd_data.kmx_tx_idle;
+       kmx_ctx_t               *tx     = NULL;
 
-        cfs_spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
+       spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
 
-        if (cfs_list_empty (&kmxlnd_data.kmx_tx_idle)) {
-                CNETERR("%d txs in use\n", kmxlnd_data.kmx_tx_used);
-                cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
-                return NULL;
-        }
+       if (cfs_list_empty (&kmxlnd_data.kmx_tx_idle)) {
+               CNETERR("%d txs in use\n", kmxlnd_data.kmx_tx_used);
+               spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+               return NULL;
+       }
 
         tmp = &kmxlnd_data.kmx_tx_idle;
         tx = cfs_list_entry (tmp->next, kmx_ctx_t, mxc_list);
@@ -252,7 +252,7 @@ mxlnd_get_idle_tx(void)
                 kmxlnd_data.kmx_tx_next_cookie = 1;
         }
         kmxlnd_data.kmx_tx_used++;
-        cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+       spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
 
         LASSERT (tx->mxc_get == tx->mxc_put);
 
@@ -296,14 +296,16 @@ mxlnd_put_idle_tx(kmx_ctx_t *tx)
         tx->mxc_put++;
         LASSERT(tx->mxc_get == tx->mxc_put);
 
-        cfs_spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
-        cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
-        kmxlnd_data.kmx_tx_used--;
-        cfs_spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
+       spin_lock(&kmxlnd_data.kmx_tx_idle_lock);
+       cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_idle);
+       kmxlnd_data.kmx_tx_used--;
+       spin_unlock(&kmxlnd_data.kmx_tx_idle_lock);
 
-        if (lntmsg[0] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[0], result);
-        if (lntmsg[1] != NULL) lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[1], result);
-        return 0;
+       if (lntmsg[0] != NULL)
+               lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[0], result);
+       if (lntmsg[1] != NULL)
+               lnet_finalize(kmxlnd_data.kmx_ni, lntmsg[1], result);
+       return 0;
 }
 
 
@@ -419,7 +421,7 @@ mxlnd_conn_cancel_pending_rxs(kmx_conn_t *conn)
 
         do {
                 found = 0;
-                cfs_spin_lock(&conn->mxk_lock);
+               spin_lock(&conn->mxk_lock);
                 cfs_list_for_each_entry_safe(ctx, next, &conn->mxk_pending,
                                              mxc_list) {
                         cfs_list_del_init(&ctx->mxc_list);
@@ -434,33 +436,32 @@ mxlnd_conn_cancel_pending_rxs(kmx_conn_t *conn)
                                 if (result == 1) {
                                         ctx->mxc_errno = -ECONNABORTED;
                                         ctx->mxc_state = MXLND_CTX_CANCELED;
-                                        cfs_spin_unlock(&conn->mxk_lock);
-                                        cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
+                                       spin_unlock(&conn->mxk_lock);
+                                       spin_lock(&kmxlnd_data.kmx_conn_lock);
                                         /* we may be holding the global lock,
                                          * move to orphan list so that it can free it */
                                         cfs_list_add_tail(&ctx->mxc_list,
                                                           &kmxlnd_data.kmx_orphan_msgs);
                                         count++;
-                                        cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
-                                        cfs_spin_lock(&conn->mxk_lock);
-                                }
-                                break;
-                        }
-                }
-                cfs_spin_unlock(&conn->mxk_lock);
-        }
-        while (found);
+                                       spin_unlock(&kmxlnd_data.kmx_conn_lock);
+                                       spin_lock(&conn->mxk_lock);
+                               }
+                               break;
+                       }
+               }
+               spin_unlock(&conn->mxk_lock);
+       } while (found);
 
-        return count;
+       return count;
 }
 
 int
 mxlnd_cancel_queued_txs(kmx_conn_t *conn)
 {
-        int                     count   = 0;
-        cfs_list_t             *tmp    = NULL;
+       int             count   = 0;
+       cfs_list_t      *tmp    = NULL;
 
-        cfs_spin_lock(&conn->mxk_lock);
+       spin_lock(&conn->mxk_lock);
         while (!cfs_list_empty(&conn->mxk_tx_free_queue) ||
                !cfs_list_empty(&conn->mxk_tx_credit_queue)) {
 
@@ -474,19 +475,19 @@ mxlnd_cancel_queued_txs(kmx_conn_t *conn)
 
                 tx = cfs_list_entry(tmp->next, kmx_ctx_t, mxc_list);
                 cfs_list_del_init(&tx->mxc_list);
-                cfs_spin_unlock(&conn->mxk_lock);
-                tx->mxc_errno = -ECONNABORTED;
-                tx->mxc_state = MXLND_CTX_CANCELED;
-                /* move to orphan list and then abort */
-                cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
-                cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs);
-                cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
-                count++;
-                cfs_spin_lock(&conn->mxk_lock);
-        }
-        cfs_spin_unlock(&conn->mxk_lock);
+               spin_unlock(&conn->mxk_lock);
+               tx->mxc_errno = -ECONNABORTED;
+               tx->mxc_state = MXLND_CTX_CANCELED;
+               /* move to orphan list and then abort */
+               spin_lock(&kmxlnd_data.kmx_conn_lock);
+               cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_orphan_msgs);
+               spin_unlock(&kmxlnd_data.kmx_conn_lock);
+               count++;
+               spin_lock(&conn->mxk_lock);
+       }
+       spin_unlock(&conn->mxk_lock);
 
-        return count;
+       return count;
 }
 
 void
@@ -513,24 +514,24 @@ mxlnd_send_message(mx_endpoint_addr_t epa, u8 msg_type, int error, u64 cookie)
 void
 mxlnd_conn_disconnect(kmx_conn_t *conn, int mx_dis, int send_bye)
 {
-        mx_endpoint_addr_t      epa     = conn->mxk_epa;
-        int                     valid   = !mxlnd_endpoint_addr_null(epa);
-        int                     count   = 0;
+       mx_endpoint_addr_t      epa     = conn->mxk_epa;
+       int                     valid   = !mxlnd_endpoint_addr_null(epa);
+       int                     count   = 0;
 
-        cfs_spin_lock(&conn->mxk_lock);
-        if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
-                cfs_spin_unlock(&conn->mxk_lock);
-                return;
-        }
-        mxlnd_set_conn_status(conn, MXLND_CONN_DISCONNECT);
-        conn->mxk_timeout = 0;
-        cfs_spin_unlock(&conn->mxk_lock);
+       spin_lock(&conn->mxk_lock);
+       if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
+               spin_unlock(&conn->mxk_lock);
+               return;
+       }
+       mxlnd_set_conn_status(conn, MXLND_CONN_DISCONNECT);
+       conn->mxk_timeout = 0;
+       spin_unlock(&conn->mxk_lock);
 
-        count = mxlnd_cancel_queued_txs(conn);
-        count += mxlnd_conn_cancel_pending_rxs(conn);
+       count = mxlnd_cancel_queued_txs(conn);
+       count += mxlnd_conn_cancel_pending_rxs(conn);
 
-        if (count)
-                cfs_up(&kmxlnd_data.kmx_conn_sem); /* let connd call kmxlnd_abort_msgs() */
+       if (count) /* let connd call kmxlnd_abort_msgs() */
+               up(&kmxlnd_data.kmx_conn_sem);
 
         if (send_bye && valid &&
             conn->mxk_peer->mxp_nid != kmxlnd_data.kmx_ni->ni_nid) {
@@ -636,7 +637,7 @@ mxlnd_conn_alloc_locked(kmx_conn_t **connp, kmx_peer_t *peer)
                 mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
                 /* mxk_epa - to be set after mx_iconnect() */
         }
-        cfs_spin_lock_init(&conn->mxk_lock);
+       spin_lock_init(&conn->mxk_lock);
         /* conn->mxk_timeout = 0 */
         /* conn->mxk_last_tx = 0 */
         /* conn->mxk_last_rx = 0 */
@@ -698,23 +699,23 @@ int
 mxlnd_conn_alloc(kmx_conn_t **connp, kmx_peer_t *peer)
 {
         int             ret     = 0;
-        cfs_rwlock_t   *g_lock  = &kmxlnd_data.kmx_global_lock;
+       rwlock_t   *g_lock  = &kmxlnd_data.kmx_global_lock;
 
-        cfs_write_lock(g_lock);
+       write_lock(g_lock);
         ret = mxlnd_conn_alloc_locked(connp, peer);
-        cfs_write_unlock(g_lock);
+       write_unlock(g_lock);
         return ret;
 }
 
 int
 mxlnd_q_pending_ctx(kmx_ctx_t *ctx)
 {
-        int             ret     = 0;
-        kmx_conn_t      *conn   = ctx->mxc_conn;
+       int             ret     = 0;
+       kmx_conn_t      *conn   = ctx->mxc_conn;
 
-        ctx->mxc_state = MXLND_CTX_PENDING;
-        if (conn != NULL) {
-                cfs_spin_lock(&conn->mxk_lock);
+       ctx->mxc_state = MXLND_CTX_PENDING;
+       if (conn != NULL) {
+               spin_lock(&conn->mxk_lock);
                 if (conn->mxk_status >= MXLND_CONN_INIT) {
                         cfs_list_add_tail(&ctx->mxc_list, &conn->mxk_pending);
                         if (conn->mxk_timeout == 0 || ctx->mxc_deadline < conn->mxk_timeout) {
@@ -724,9 +725,9 @@ mxlnd_q_pending_ctx(kmx_ctx_t *ctx)
                         ctx->mxc_state = MXLND_CTX_COMPLETED;
                         ret = -1;
                 }
-                cfs_spin_unlock(&conn->mxk_lock);
-        }
-        return ret;
+               spin_unlock(&conn->mxk_lock);
+       }
+       return ret;
 }
 
 int
@@ -745,7 +746,7 @@ mxlnd_deq_pending_ctx(kmx_ctx_t *ctx)
                 kmx_ctx_t       *next = NULL;
 
                 LASSERT(conn != NULL);
-                cfs_spin_lock(&conn->mxk_lock);
+               spin_lock(&conn->mxk_lock);
                 cfs_list_del_init(&ctx->mxc_list);
                 conn->mxk_timeout = 0;
                 if (!cfs_list_empty(&conn->mxk_pending)) {
@@ -753,9 +754,9 @@ mxlnd_deq_pending_ctx(kmx_ctx_t *ctx)
                                               kmx_ctx_t, mxc_list);
                         conn->mxk_timeout = next->mxc_deadline;
                 }
-                cfs_spin_unlock(&conn->mxk_lock);
-        }
-        return 0;
+               spin_unlock(&conn->mxk_lock);
+       }
+       return 0;
 }
 
 /**
@@ -949,23 +950,23 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
         int             hash    = 0;
         kmx_peer_t      *peer   = NULL;
         kmx_peer_t      *old    = NULL;
-        cfs_rwlock_t    *g_lock = &kmxlnd_data.kmx_global_lock;
+       rwlock_t    *g_lock = &kmxlnd_data.kmx_global_lock;
 
-        cfs_read_lock(g_lock);
+       read_lock(g_lock);
         peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */
 
         if ((peer && peer->mxp_conn) || /* found peer with conn or */
             (!peer && !create)) {       /* did not find peer and do not create one */
-                cfs_read_unlock(g_lock);
+               read_unlock(g_lock);
                 return peer;
         }
 
-        cfs_read_unlock(g_lock);
+       read_unlock(g_lock);
 
         /* if peer but _not_ conn */
         if (peer && !peer->mxp_conn) {
                 if (create) {
-                        cfs_write_lock(g_lock);
+                       write_lock(g_lock);
                         if (!peer->mxp_conn) { /* check again */
                                 /* create the conn */
                                 ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer);
@@ -979,7 +980,7 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
                                         mxlnd_conn_decref(peer->mxp_conn);
                                 }
                         }
-                        cfs_write_unlock(g_lock);
+                       write_unlock(g_lock);
                 }
                 return peer;
         }
@@ -994,7 +995,7 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
         if (ret != 0) /* no memory, peer is NULL */
                 return NULL;
 
-        cfs_write_lock(g_lock);
+       write_lock(g_lock);
 
         /* look again */
         old = mxlnd_find_peer_by_nid_locked(nid);
@@ -1013,7 +1014,7 @@ mxlnd_find_peer_by_nid(lnet_nid_t nid, int create)
                 mxlnd_conn_decref(peer->mxp_conn); /* drop ref from peer_alloc */
         }
 
-        cfs_write_unlock(g_lock);
+       write_unlock(g_lock);
 
         return peer;
 }
@@ -1335,13 +1336,13 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source,
 
         mx_decompose_endpoint_addr2(source, &nic_id, &ep_id, &sid);
         mxlnd_parse_match(match_value, &msg_type, &error, &cookie);
-        cfs_read_lock(&kmxlnd_data.kmx_global_lock);
+       read_lock(&kmxlnd_data.kmx_global_lock);
         mx_get_endpoint_addr_context(source, (void **) &conn);
         if (conn) {
                 mxlnd_conn_addref(conn); /* add ref for this function */
                 peer = conn->mxk_peer;
         }
-        cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
+       read_unlock(&kmxlnd_data.kmx_global_lock);
 
         if (msg_type == MXLND_MSG_BYE) {
                 if (conn) {
@@ -1374,12 +1375,12 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source,
                         mxlnd_send_message(source, MXLND_MSG_CONN_ACK, ENOMEM, 0);
                         return MX_RECV_FINISHED;
                 }
-                cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
-                cfs_list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
-                cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
-                cfs_up(&kmxlnd_data.kmx_conn_sem);
-                return MX_RECV_FINISHED;
-        }
+               spin_lock(&kmxlnd_data.kmx_conn_lock);
+               cfs_list_add_tail(&cp->mxr_list, &kmxlnd_data.kmx_conn_reqs);
+               spin_unlock(&kmxlnd_data.kmx_conn_lock);
+               up(&kmxlnd_data.kmx_conn_sem);
+               return MX_RECV_FINISHED;
+       }
         if (msg_type == MXLND_MSG_CONN_ACK) {
                 kmx_connparams_t  *cp           = NULL;
                 const int       expected        = offsetof(kmx_msg_t, mxm_u) +
@@ -1404,13 +1405,13 @@ mxlnd_unexpected_recv(void *context, mx_endpoint_addr_t source,
                                 CNETERR("unable to alloc kmx_connparams_t"
                                                " from %llx:%d\n", nic_id, ep_id);
                                 mxlnd_conn_disconnect(conn, 1, 1);
-                        } else {
-                                cfs_spin_lock(&kmxlnd_data.kmx_conn_lock);
-                                cfs_list_add_tail(&cp->mxr_list,
-                                                  &kmxlnd_data.kmx_conn_reqs);
-                                cfs_spin_unlock(&kmxlnd_data.kmx_conn_lock);
-                                cfs_up(&kmxlnd_data.kmx_conn_sem);
-                        }
+                       } else {
+                               spin_lock(&kmxlnd_data.kmx_conn_lock);
+                               cfs_list_add_tail(&cp->mxr_list,
+                                                 &kmxlnd_data.kmx_conn_reqs);
+                               spin_unlock(&kmxlnd_data.kmx_conn_lock);
+                               up(&kmxlnd_data.kmx_conn_sem);
+                       }
                 }
                 mxlnd_conn_decref(conn); /* drop ref taken above */
 
@@ -1474,7 +1475,7 @@ mxlnd_get_peer_info(int index, lnet_nid_t *nidp, int *count)
         int              ret    = -ENOENT;
         kmx_peer_t      *peer   = NULL;
 
-        cfs_read_lock(&kmxlnd_data.kmx_global_lock);
+       read_lock(&kmxlnd_data.kmx_global_lock);
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
                 cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
                                         mxp_list) {
@@ -1486,7 +1487,7 @@ mxlnd_get_peer_info(int index, lnet_nid_t *nidp, int *count)
                         }
                 }
         }
-        cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
+       read_unlock(&kmxlnd_data.kmx_global_lock);
 
         return ret;
 }
@@ -1514,7 +1515,7 @@ mxlnd_del_peer(lnet_nid_t nid)
         if (nid != LNET_NID_ANY) {
                 peer = mxlnd_find_peer_by_nid(nid, 0); /* adds peer ref */
         }
-        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
+       write_lock(&kmxlnd_data.kmx_global_lock);
         if (nid != LNET_NID_ANY) {
                 if (peer == NULL) {
                         ret = -ENOENT;
@@ -1531,7 +1532,7 @@ mxlnd_del_peer(lnet_nid_t nid)
                         }
                 }
         }
-        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+       write_unlock(&kmxlnd_data.kmx_global_lock);
 
         return ret;
 }
@@ -1543,7 +1544,7 @@ mxlnd_get_conn_by_idx(int index)
         kmx_peer_t      *peer   = NULL;
         kmx_conn_t      *conn   = NULL;
 
-        cfs_read_lock(&kmxlnd_data.kmx_global_lock);
+       read_lock(&kmxlnd_data.kmx_global_lock);
         for (i = 0; i < MXLND_HASH_SIZE; i++) {
                 cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
                                         mxp_list) {
@@ -1554,12 +1555,12 @@ mxlnd_get_conn_by_idx(int index)
                                 }
 
                                 mxlnd_conn_addref(conn); /* add ref here, dec in ctl() */
-                                cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
+                               read_unlock(&kmxlnd_data.kmx_global_lock);
                                 return conn;
                         }
                 }
         }
-        cfs_read_unlock(&kmxlnd_data.kmx_global_lock);
+       read_unlock(&kmxlnd_data.kmx_global_lock);
 
         return NULL;
 }
@@ -1583,7 +1584,7 @@ mxlnd_close_matching_conns(lnet_nid_t nid)
         int             ret     = 0;
         kmx_peer_t      *peer   = NULL;
 
-        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
+       write_lock(&kmxlnd_data.kmx_global_lock);
         if (nid != LNET_NID_ANY) {
                 peer = mxlnd_find_peer_by_nid_locked(nid); /* adds peer ref */
                 if (peer == NULL) {
@@ -1598,7 +1599,7 @@ mxlnd_close_matching_conns(lnet_nid_t nid)
                                 mxlnd_close_matching_conns_locked(peer);
                 }
         }
-        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+       write_unlock(&kmxlnd_data.kmx_global_lock);
 
         return ret;
 }
@@ -1710,13 +1711,13 @@ mxlnd_peer_queue_tx_locked(kmx_ctx_t *tx)
 static inline void
 mxlnd_peer_queue_tx(kmx_ctx_t *tx)
 {
-        LASSERT(tx->mxc_peer != NULL);
-        LASSERT(tx->mxc_conn != NULL);
-        cfs_spin_lock(&tx->mxc_conn->mxk_lock);
-        mxlnd_peer_queue_tx_locked(tx);
-        cfs_spin_unlock(&tx->mxc_conn->mxk_lock);
+       LASSERT(tx->mxc_peer != NULL);
+       LASSERT(tx->mxc_conn != NULL);
+       spin_lock(&tx->mxc_conn->mxk_lock);
+       mxlnd_peer_queue_tx_locked(tx);
+       spin_unlock(&tx->mxc_conn->mxk_lock);
 
-        return;
+       return;
 }
 
 /**
@@ -1757,13 +1758,13 @@ mxlnd_queue_tx(kmx_ctx_t *tx)
                 mxlnd_peer_queue_tx(tx);
                 mxlnd_check_sends(peer);
         } else {
-                cfs_spin_lock(&kmxlnd_data.kmx_tx_queue_lock);
-                cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue);
-                cfs_spin_unlock(&kmxlnd_data.kmx_tx_queue_lock);
-                cfs_up(&kmxlnd_data.kmx_tx_queue_sem);
-        }
+               spin_lock(&kmxlnd_data.kmx_tx_queue_lock);
+               cfs_list_add_tail(&tx->mxc_list, &kmxlnd_data.kmx_tx_queue);
+               spin_unlock(&kmxlnd_data.kmx_tx_queue_lock);
+               up(&kmxlnd_data.kmx_tx_queue_sem);
+       }
 done:
-        return;
+       return;
 }
 
 int
@@ -2119,7 +2120,7 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         int                     nob             = 0;
         uint32_t                length          = 0;
         kmx_peer_t             *peer            = NULL;
-        cfs_rwlock_t           *g_lock          = &kmxlnd_data.kmx_global_lock;
+       rwlock_t                *g_lock         =&kmxlnd_data.kmx_global_lock;
 
         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
                        payload_nob, payload_niov, libcfs_id2str(target));
@@ -2151,14 +2152,13 @@ mxlnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
         if (unlikely(peer->mxp_incompatible)) {
                 mxlnd_peer_decref(peer); /* drop ref taken above */
         } else {
-                cfs_read_lock(g_lock);
-                conn = peer->mxp_conn;
-                if (conn && conn->mxk_status != MXLND_CONN_DISCONNECT) {
-                        mxlnd_conn_addref(conn);
-                } else {
-                        conn = NULL;
-                }
-                cfs_read_unlock(g_lock);
+               read_lock(g_lock);
+               conn = peer->mxp_conn;
+               if (conn && conn->mxk_status != MXLND_CONN_DISCONNECT)
+                       mxlnd_conn_addref(conn);
+               else
+                       conn = NULL;
+               read_unlock(g_lock);
                 mxlnd_peer_decref(peer); /* drop peer ref taken above */
                 if (!conn)
                         return -ENOTCONN;
@@ -2506,11 +2506,11 @@ mxlnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
 
         if (repost) {
                 /* we received a message, increment peer's outstanding credits */
-                if (credit == 1) {
-                        cfs_spin_lock(&conn->mxk_lock);
-                        conn->mxk_outstanding++;
-                        cfs_spin_unlock(&conn->mxk_lock);
-                }
+               if (credit == 1) {
+                       spin_lock(&conn->mxk_lock);
+                       conn->mxk_outstanding++;
+                       spin_unlock(&conn->mxk_lock);
+               }
                 /* we are done with the rx */
                 mxlnd_put_idle_rx(rx);
                 mxlnd_conn_decref(conn);
@@ -2549,46 +2549,47 @@ mxlnd_tx_queued(void *arg)
         kmx_ctx_t              *tx      = NULL;
         kmx_peer_t             *peer    = NULL;
         cfs_list_t             *queue   = &kmxlnd_data.kmx_tx_queue;
-        cfs_spinlock_t         *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
-        cfs_rwlock_t           *g_lock  = &kmxlnd_data.kmx_global_lock;
-
-        cfs_daemonize("mxlnd_tx_queued");
-
-        while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
-                ret = cfs_down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
-                if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
-                        break;
-                if (ret != 0) // Should we check for -EINTR?
-                        continue;
-                cfs_spin_lock(tx_q_lock);
-                if (cfs_list_empty (&kmxlnd_data.kmx_tx_queue)) {
-                        cfs_spin_unlock(tx_q_lock);
-                        continue;
-                }
-                tx = cfs_list_entry (queue->next, kmx_ctx_t, mxc_list);
-                cfs_list_del_init(&tx->mxc_list);
-                cfs_spin_unlock(tx_q_lock);
-
-                found = 0;
-                peer = mxlnd_find_peer_by_nid(tx->mxc_nid, 0); /* adds peer ref */
-                if (peer != NULL) {
-                        tx->mxc_peer = peer;
-                        cfs_write_lock(g_lock);
-                        if (peer->mxp_conn == NULL) {
-                                ret = mxlnd_conn_alloc_locked(&peer->mxp_conn, peer);
-                                if (ret != 0) {
-                                        /* out of memory, give up and fail tx */
-                                        tx->mxc_errno = -ENOMEM;
-                                        mxlnd_peer_decref(peer);
-                                        cfs_write_unlock(g_lock);
-                                        mxlnd_put_idle_tx(tx);
-                                        continue;
-                                }
-                        }
-                        tx->mxc_conn = peer->mxp_conn;
-                        mxlnd_conn_addref(tx->mxc_conn); /* for this tx */
-                        mxlnd_peer_decref(peer); /* drop peer ref taken above */
-                        cfs_write_unlock(g_lock);
+       spinlock_t              *tx_q_lock = &kmxlnd_data.kmx_tx_queue_lock;
+       rwlock_t                *g_lock  = &kmxlnd_data.kmx_global_lock;
+
+       cfs_daemonize("mxlnd_tx_queued");
+
+       while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+               ret = down_interruptible(&kmxlnd_data.kmx_tx_queue_sem);
+               if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
+                       break;
+               if (ret != 0) /* Should we check for -EINTR? */
+                       continue;
+               spin_lock(tx_q_lock);
+               if (cfs_list_empty(&kmxlnd_data.kmx_tx_queue)) {
+                       spin_unlock(tx_q_lock);
+                       continue;
+               }
+               tx = cfs_list_entry(queue->next, kmx_ctx_t, mxc_list);
+               cfs_list_del_init(&tx->mxc_list);
+               spin_unlock(tx_q_lock);
+
+               found = 0;
+               peer = mxlnd_find_peer_by_nid(tx->mxc_nid, 0); /* adds ref*/
+               if (peer != NULL) {
+                       tx->mxc_peer = peer;
+                       write_lock(g_lock);
+                       if (peer->mxp_conn == NULL) {
+                               ret = mxlnd_conn_alloc_locked(&peer->mxp_conn,
+                                                             peer);
+                               if (ret != 0) {
+                                       /* out of memory: give up, fail tx */
+                                       tx->mxc_errno = -ENOMEM;
+                                       mxlnd_peer_decref(peer);
+                                       write_unlock(g_lock);
+                                       mxlnd_put_idle_tx(tx);
+                                       continue;
+                               }
+                       }
+                       tx->mxc_conn = peer->mxp_conn;
+                       mxlnd_conn_addref(tx->mxc_conn); /* for this tx */
+                       mxlnd_peer_decref(peer); /* drop peer ref taken above */
+                       write_unlock(g_lock);
                         mxlnd_queue_tx(tx);
                         found = 1;
                 }
@@ -2619,7 +2620,7 @@ mxlnd_tx_queued(void *arg)
                         /* add peer to global peer list, but look to see
                          * if someone already created it after we released
                          * the read lock */
-                        cfs_write_lock(g_lock);
+                       write_lock(g_lock);
                         old = mxlnd_find_peer_by_nid_locked(peer->mxp_nid);
                         if (old) {
                                 /* we have a peer ref on old */
@@ -2647,7 +2648,7 @@ mxlnd_tx_queued(void *arg)
                                 mxlnd_conn_decref(peer->mxp_conn); /* drop peer's ref */
                                 mxlnd_peer_decref(peer);
                         }
-                        cfs_write_unlock(g_lock);
+                       write_unlock(g_lock);
 
                         mxlnd_queue_tx(tx);
                 }
@@ -2684,10 +2685,10 @@ mxlnd_iconnect(kmx_peer_t *peer, u8 msg_type)
                         mx_nic_id_to_board_number(peer->mxp_nic_id, &peer->mxp_board);
                 }
                 if (peer->mxp_nic_id == 0ULL && conn->mxk_status == MXLND_CONN_WAIT) {
-                        /* not mapped yet, return */
-                        cfs_spin_lock(&conn->mxk_lock);
-                        mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
-                        cfs_spin_unlock(&conn->mxk_lock);
+                       /* not mapped yet, return */
+                       spin_lock(&conn->mxk_lock);
+                       mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
+                       spin_unlock(&conn->mxk_lock);
                 }
         }
 
@@ -2706,9 +2707,9 @@ mxlnd_iconnect(kmx_peer_t *peer, u8 msg_type)
                             peer->mxp_ep_id, MXLND_MSG_MAGIC, match,
                             (void *) peer, &request);
         if (unlikely(mxret != MX_SUCCESS)) {
-                cfs_spin_lock(&conn->mxk_lock);
-                mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                cfs_spin_unlock(&conn->mxk_lock);
+               spin_lock(&conn->mxk_lock);
+               mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+               spin_unlock(&conn->mxk_lock);
                 CNETERR("mx_iconnect() failed with %s (%d) to %s\n",
                        mx_strerror(mxret), mxret, libcfs_nid2str(peer->mxp_nid));
                 mxlnd_conn_decref(conn);
@@ -2741,18 +2742,18 @@ mxlnd_check_sends(kmx_peer_t *peer)
                 LASSERT(peer != NULL);
                 return -1;
         }
-        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
-        conn = peer->mxp_conn;
-        /* NOTE take a ref for the duration of this function since it is called
-         * when there might not be any queued txs for this peer */
-        if (conn) {
-                if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
-                        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
-                        return -1;
-                }
-                mxlnd_conn_addref(conn); /* for duration of this function */
-        }
-        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+       write_lock(&kmxlnd_data.kmx_global_lock);
+       conn = peer->mxp_conn;
+       /* NOTE take a ref for the duration of this function since it is
+        * called when there might not be any queued txs for this peer */
+       if (conn) {
+               if (conn->mxk_status == MXLND_CONN_DISCONNECT) {
+                       write_unlock(&kmxlnd_data.kmx_global_lock);
+                       return -1;
+               }
+               mxlnd_conn_addref(conn); /* for duration of this function */
+       }
+       write_unlock(&kmxlnd_data.kmx_global_lock);
 
         /* do not add another ref for this tx */
 
@@ -2773,7 +2774,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
         }
 #endif
 
-        cfs_spin_lock(&conn->mxk_lock);
+       spin_lock(&conn->mxk_lock);
         ntx_posted = conn->mxk_ntx_posted;
         credits = conn->mxk_credits;
 
@@ -2808,7 +2809,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
             conn->mxk_status == MXLND_CONN_FAIL)) {
                 CDEBUG(D_NET, "status=%s\n", mxlnd_connstatus_to_str(conn->mxk_status));
                 mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
-                cfs_spin_unlock(&conn->mxk_lock);
+               spin_unlock(&conn->mxk_lock);
                 mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_REQ);
                 goto done;
         }
@@ -2879,7 +2880,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
                                     cfs_time_aftereq(jiffies, tx->mxc_deadline)) {
                                         cfs_list_del_init(&tx->mxc_list);
                                         tx->mxc_errno = -ECONNABORTED;
-                                        cfs_spin_unlock(&conn->mxk_lock);
+                                       spin_unlock(&conn->mxk_lock);
                                         mxlnd_put_idle_tx(tx);
                                         mxlnd_conn_decref(conn);
                                         goto done;
@@ -2915,7 +2916,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
                             (conn->mxk_ntx_msgs >= 1)) {
                                 conn->mxk_credits++;
                                 conn->mxk_ntx_posted--;
-                                cfs_spin_unlock(&conn->mxk_lock);
+                               spin_unlock(&conn->mxk_lock);
                                 /* redundant NOOP */
                                 mxlnd_put_idle_tx(tx);
                                 mxlnd_conn_decref(conn);
@@ -2935,7 +2936,7 @@ mxlnd_check_sends(kmx_peer_t *peer)
                 mxret = MX_SUCCESS;
 
                 status = conn->mxk_status;
-                cfs_spin_unlock(&conn->mxk_lock);
+               spin_unlock(&conn->mxk_lock);
 
                 if (likely((status == MXLND_CONN_READY) ||
                     (msg_type == MXLND_MSG_CONN_REQ) ||
@@ -2969,10 +2970,10 @@ mxlnd_check_sends(kmx_peer_t *peer)
                                                           &tx->mxc_mxreq);
                                 } else {
                                         /* send a DATA tx */
-                                        cfs_spin_lock(&conn->mxk_lock);
-                                        conn->mxk_ntx_data--;
-                                        conn->mxk_data_posted++;
-                                        cfs_spin_unlock(&conn->mxk_lock);
+                                       spin_lock(&conn->mxk_lock);
+                                       conn->mxk_ntx_data--;
+                                       conn->mxk_data_posted++;
+                                       spin_unlock(&conn->mxk_lock);
                                         CDEBUG(D_NET, "sending %s 0x%llx\n",
                                                mxlnd_msgtype_to_str(msg_type),
                                                tx->mxc_cookie);
@@ -3003,23 +3004,24 @@ mxlnd_check_sends(kmx_peer_t *peer)
                                         tx->mxc_errno = -ECONNABORTED;
                                 }
                                 if (credit) {
-                                        cfs_spin_lock(&conn->mxk_lock);
-                                        conn->mxk_ntx_posted--;
-                                        conn->mxk_credits++;
-                                        cfs_spin_unlock(&conn->mxk_lock);
-                                } else if (msg_type == MXLND_MSG_PUT_DATA ||
-                                        msg_type == MXLND_MSG_GET_DATA) {
-                                        cfs_spin_lock(&conn->mxk_lock);
-                                        conn->mxk_data_posted--;
-                                        cfs_spin_unlock(&conn->mxk_lock);
-                                }
-                                if (msg_type != MXLND_MSG_PUT_DATA &&
-                                    msg_type != MXLND_MSG_GET_DATA &&
-                                    msg_type != MXLND_MSG_CONN_REQ &&
-                                    msg_type != MXLND_MSG_CONN_ACK) {
-                                        cfs_spin_lock(&conn->mxk_lock);
-                                        conn->mxk_outstanding += tx->mxc_msg->mxm_credits;
-                                        cfs_spin_unlock(&conn->mxk_lock);
+                                       spin_lock(&conn->mxk_lock);
+                                       conn->mxk_ntx_posted--;
+                                       conn->mxk_credits++;
+                                       spin_unlock(&conn->mxk_lock);
+                               } else if (msg_type == MXLND_MSG_PUT_DATA ||
+                                          msg_type == MXLND_MSG_GET_DATA) {
+                                       spin_lock(&conn->mxk_lock);
+                                       conn->mxk_data_posted--;
+                                       spin_unlock(&conn->mxk_lock);
+                               }
+                               if (msg_type != MXLND_MSG_PUT_DATA &&
+                                   msg_type != MXLND_MSG_GET_DATA &&
+                                   msg_type != MXLND_MSG_CONN_REQ &&
+                                   msg_type != MXLND_MSG_CONN_ACK) {
+                                       spin_lock(&conn->mxk_lock);
+                                       conn->mxk_outstanding +=
+                                               tx->mxc_msg->mxm_credits;
+                                       spin_unlock(&conn->mxk_lock);
                                 }
                                 if (msg_type != MXLND_MSG_CONN_REQ &&
                                     msg_type != MXLND_MSG_CONN_ACK) {
@@ -3030,13 +3032,13 @@ mxlnd_check_sends(kmx_peer_t *peer)
                                 mxlnd_conn_decref(conn);
                         }
                 }
-                cfs_spin_lock(&conn->mxk_lock);
-        }
+               spin_lock(&conn->mxk_lock);
+       }
 done_locked:
-        cfs_spin_unlock(&conn->mxk_lock);
+       spin_unlock(&conn->mxk_lock);
 done:
-        mxlnd_conn_decref(conn); /* drop ref taken at start of function */
-        return found;
+       mxlnd_conn_decref(conn); /* drop ref taken at start of function */
+       return found;
 }
 
 
@@ -3073,29 +3075,29 @@ mxlnd_handle_tx_completion(kmx_ctx_t *tx)
         if (failed) {
                 if (tx->mxc_errno == 0) tx->mxc_errno = -EIO;
         } else {
-                cfs_spin_lock(&conn->mxk_lock);
-                conn->mxk_last_tx = cfs_time_current(); /* jiffies */
-                cfs_spin_unlock(&conn->mxk_lock);
-        }
-
-        switch (type) {
-
-        case MXLND_MSG_GET_DATA:
-                cfs_spin_lock(&conn->mxk_lock);
-                if (conn->mxk_incarnation == tx->mxc_incarnation) {
-                        conn->mxk_outstanding++;
-                        conn->mxk_data_posted--;
-                }
-                cfs_spin_unlock(&conn->mxk_lock);
-                break;
-
-        case MXLND_MSG_PUT_DATA:
-                cfs_spin_lock(&conn->mxk_lock);
-                if (conn->mxk_incarnation == tx->mxc_incarnation) {
-                        conn->mxk_data_posted--;
-                }
-                cfs_spin_unlock(&conn->mxk_lock);
-                break;
+               spin_lock(&conn->mxk_lock);
+               conn->mxk_last_tx = cfs_time_current(); /* jiffies */
+               spin_unlock(&conn->mxk_lock);
+       }
+
+       switch (type) {
+
+       case MXLND_MSG_GET_DATA:
+               spin_lock(&conn->mxk_lock);
+               if (conn->mxk_incarnation == tx->mxc_incarnation) {
+                       conn->mxk_outstanding++;
+                       conn->mxk_data_posted--;
+               }
+               spin_unlock(&conn->mxk_lock);
+               break;
+
+       case MXLND_MSG_PUT_DATA:
+               spin_lock(&conn->mxk_lock);
+               if (conn->mxk_incarnation == tx->mxc_incarnation) {
+                       conn->mxk_data_posted--;
+               }
+               spin_unlock(&conn->mxk_lock);
+               break;
 
         case MXLND_MSG_NOOP:
         case MXLND_MSG_PUT_REQ:
@@ -3116,12 +3118,14 @@ mxlnd_handle_tx_completion(kmx_ctx_t *tx)
                                mx_strstatus(code), code, tx->mxc_errno,
                                libcfs_nid2str(tx->mxc_nid));
                         if (!peer->mxp_incompatible) {
-                                cfs_spin_lock(&conn->mxk_lock);
-                                if (code == MX_STATUS_BAD_SESSION)
-                                        mxlnd_set_conn_status(conn, MXLND_CONN_INIT);
-                                else
-                                        mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                                cfs_spin_unlock(&conn->mxk_lock);
+                               spin_lock(&conn->mxk_lock);
+                               if (code == MX_STATUS_BAD_SESSION)
+                                       mxlnd_set_conn_status(conn,
+                                                             MXLND_CONN_INIT);
+                               else
+                                       mxlnd_set_conn_status(conn,
+                                                             MXLND_CONN_FAIL);
+                               spin_unlock(&conn->mxk_lock);
                         }
                 }
                 break;
@@ -3132,11 +3136,11 @@ mxlnd_handle_tx_completion(kmx_ctx_t *tx)
         }
 
         if (credit) {
-                cfs_spin_lock(&conn->mxk_lock);
-                if (conn->mxk_incarnation == tx->mxc_incarnation) {
-                        conn->mxk_ntx_posted--;
-                }
-                cfs_spin_unlock(&conn->mxk_lock);
+               spin_lock(&conn->mxk_lock);
+               if (conn->mxk_incarnation == tx->mxc_incarnation) {
+                       conn->mxk_ntx_posted--;
+               }
+               spin_unlock(&conn->mxk_lock);
         }
 
         mxlnd_put_idle_tx(tx);
@@ -3187,7 +3191,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
         } /* else peer and conn == NULL */
 
         if (conn == NULL && peer != NULL) {
-                cfs_write_lock(&kmxlnd_data.kmx_global_lock);
+               write_lock(&kmxlnd_data.kmx_global_lock);
                 conn = peer->mxp_conn;
                 if (conn) {
                         mxlnd_conn_addref(conn); /* conn takes ref... */
@@ -3195,7 +3199,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
                         conn_ref = 1;
                         peer_ref = 0;
                 }
-                cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+               write_unlock(&kmxlnd_data.kmx_global_lock);
                 rx->mxc_conn = conn;
         }
 
@@ -3279,7 +3283,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
 
         LASSERT(peer != NULL && conn != NULL);
         if (msg->mxm_credits != 0) {
-                cfs_spin_lock(&conn->mxk_lock);
+               spin_lock(&conn->mxk_lock);
                 if (msg->mxm_srcstamp == conn->mxk_incarnation) {
                         if ((conn->mxk_credits + msg->mxm_credits) >
                              *kmxlnd_tunables.kmx_peercredits) {
@@ -3290,7 +3294,7 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
                         LASSERT(conn->mxk_credits >= 0);
                         LASSERT(conn->mxk_credits <= *kmxlnd_tunables.kmx_peercredits);
                 }
-                cfs_spin_unlock(&conn->mxk_lock);
+               spin_unlock(&conn->mxk_lock);
         }
 
         CDEBUG(D_NET, "switch %s for rx (0x%llx)\n", mxlnd_msgtype_to_str(type), seq);
@@ -3340,16 +3344,16 @@ mxlnd_handle_rx_completion(kmx_ctx_t *rx)
 
         if (ret < 0) {
                 CDEBUG(D_NET, "setting PEER_CONN_FAILED\n");
-                cfs_spin_lock(&conn->mxk_lock);
-                mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                cfs_spin_unlock(&conn->mxk_lock);
-        }
+               spin_lock(&conn->mxk_lock);
+               mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+               spin_unlock(&conn->mxk_lock);
+       }
 
 cleanup:
-        if (conn != NULL) {
-                cfs_spin_lock(&conn->mxk_lock);
-                conn->mxk_last_rx = cfs_time_current(); /* jiffies */
-                cfs_spin_unlock(&conn->mxk_lock);
+       if (conn != NULL) {
+               spin_lock(&conn->mxk_lock);
+               conn->mxk_last_rx = cfs_time_current(); /* jiffies */
+               spin_unlock(&conn->mxk_lock);
         }
 
         if (repost) {
@@ -3360,9 +3364,9 @@ cleanup:
                             type == MXLND_MSG_EAGER ||
                             type == MXLND_MSG_PUT_REQ ||
                             type == MXLND_MSG_NOOP) {
-                                cfs_spin_lock(&conn->mxk_lock);
-                                conn->mxk_outstanding++;
-                                cfs_spin_unlock(&conn->mxk_lock);
+                               spin_lock(&conn->mxk_lock);
+                               conn->mxk_outstanding++;
+                               spin_unlock(&conn->mxk_lock);
                         }
                 }
                 if (conn_ref) mxlnd_conn_decref(conn);
@@ -3410,9 +3414,9 @@ mxlnd_handle_connect_msg(kmx_peer_t *peer, u8 msg_type, mx_status_t status)
                         peer->mxp_nid,
                         peer->mxp_nic_id,
                         peer->mxp_ep_id);
-                cfs_spin_lock(&conn->mxk_lock);
-                mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                cfs_spin_unlock(&conn->mxk_lock);
+               spin_lock(&conn->mxk_lock);
+               mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+               spin_unlock(&conn->mxk_lock);
 
                 if (cfs_time_after(jiffies, peer->mxp_reconnect_time +
                                    MXLND_CONNECT_TIMEOUT)) {
@@ -3424,21 +3428,21 @@ mxlnd_handle_connect_msg(kmx_peer_t *peer, u8 msg_type, mx_status_t status)
                 return;
         }
         mx_decompose_endpoint_addr2(status.source, &nic_id, &ep_id, &sid);
-        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
-        cfs_spin_lock(&conn->mxk_lock);
-        conn->mxk_epa = status.source;
-        mx_set_endpoint_addr_context(conn->mxk_epa, (void *) conn);
-        if (msg_type == MXLND_MSG_ICON_ACK && likely(!peer->mxp_incompatible)) {
-                mxlnd_set_conn_status(conn, MXLND_CONN_READY);
-        }
-        cfs_spin_unlock(&conn->mxk_lock);
-        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
-
-        /* mx_iconnect() succeeded, reset delay to 0 */
-        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
-        peer->mxp_reconnect_time = 0;
-        peer->mxp_conn->mxk_sid = sid;
-        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
+       write_lock(&kmxlnd_data.kmx_global_lock);
+       spin_lock(&conn->mxk_lock);
+       conn->mxk_epa = status.source;
+       mx_set_endpoint_addr_context(conn->mxk_epa, (void *) conn);
+       if (msg_type == MXLND_MSG_ICON_ACK && likely(!peer->mxp_incompatible)) {
+               mxlnd_set_conn_status(conn, MXLND_CONN_READY);
+       }
+       spin_unlock(&conn->mxk_lock);
+       write_unlock(&kmxlnd_data.kmx_global_lock);
+
+       /* mx_iconnect() succeeded, reset delay to 0 */
+       write_lock(&kmxlnd_data.kmx_global_lock);
+       peer->mxp_reconnect_time = 0;
+       peer->mxp_conn->mxk_sid = sid;
+       write_unlock(&kmxlnd_data.kmx_global_lock);
 
         /* marshal CONN_REQ or CONN_ACK msg */
         /* we are still using the conn ref from iconnect() - do not take another */
@@ -3447,9 +3451,9 @@ mxlnd_handle_connect_msg(kmx_peer_t *peer, u8 msg_type, mx_status_t status)
                 CNETERR("Can't obtain %s tx for %s\n",
                        mxlnd_msgtype_to_str(type),
                        libcfs_nid2str(peer->mxp_nid));
-                cfs_spin_lock(&conn->mxk_lock);
-                mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                cfs_spin_unlock(&conn->mxk_lock);
+               spin_lock(&conn->mxk_lock);
+               mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+               spin_unlock(&conn->mxk_lock);
                 mxlnd_conn_decref(conn);
                 return;
         }
@@ -3598,15 +3602,15 @@ mxlnd_check_timeouts(unsigned long now)
         unsigned long   next            = 0; /* jiffies */
         kmx_peer_t      *peer           = NULL;
         kmx_conn_t      *conn           = NULL;
-        cfs_rwlock_t    *g_lock         = &kmxlnd_data.kmx_global_lock;
+       rwlock_t        *g_lock         = &kmxlnd_data.kmx_global_lock;
 
-        cfs_read_lock(g_lock);
-        for (i = 0; i < MXLND_HASH_SIZE; i++) {
-                cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
-                                        mxp_list) {
+       read_lock(g_lock);
+       for (i = 0; i < MXLND_HASH_SIZE; i++) {
+               cfs_list_for_each_entry(peer, &kmxlnd_data.kmx_peers[i],
+                                       mxp_list) {
 
-                        if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
-                                cfs_read_unlock(g_lock);
+                       if (unlikely(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
+                               read_unlock(g_lock);
                                 return next;
                         }
 
@@ -3617,14 +3621,14 @@ mxlnd_check_timeouts(unsigned long now)
                                 continue;
                         }
 
-                        cfs_spin_lock(&conn->mxk_lock);
+                       spin_lock(&conn->mxk_lock);
 
-                        /* if nothing pending (timeout == 0) or
-                         * if conn is already disconnected,
-                         * skip this conn */
-                        if (conn->mxk_timeout == 0 ||
-                            conn->mxk_status == MXLND_CONN_DISCONNECT) {
-                                cfs_spin_unlock(&conn->mxk_lock);
+                       /* if nothing pending (timeout == 0) or
+                        * if conn is already disconnected,
+                        * skip this conn */
+                       if (conn->mxk_timeout == 0 ||
+                           conn->mxk_status == MXLND_CONN_DISCONNECT) {
+                               spin_unlock(&conn->mxk_lock);
                                 mxlnd_conn_decref(conn);
                                 continue;
                         }
@@ -3640,21 +3644,20 @@ mxlnd_check_timeouts(unsigned long now)
 
                         disconnect = 0;
 
-                        if (cfs_time_aftereq(now, conn->mxk_timeout))  {
-                                disconnect = 1;
-                        }
-                        cfs_spin_unlock(&conn->mxk_lock);
+                       if (cfs_time_aftereq(now, conn->mxk_timeout))
+                               disconnect = 1;
+                       spin_unlock(&conn->mxk_lock);
 
-                        if (disconnect) {
-                                mxlnd_conn_disconnect(conn, 1, 1);
-                        }
-                        mxlnd_conn_decref(conn);
-                }
-        }
-        cfs_read_unlock(g_lock);
-        if (next == 0) next = now + MXLND_COMM_TIMEOUT;
+                       if (disconnect)
+                               mxlnd_conn_disconnect(conn, 1, 1);
+                       mxlnd_conn_decref(conn);
+               }
+       }
+       read_unlock(g_lock);
+       if (next == 0)
+               next = now + MXLND_COMM_TIMEOUT;
 
-        return next;
+       return next;
 }
 
 void
@@ -3669,7 +3672,7 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
         kmx_msg_t       *msg            = &cp->mxr_msg;
         kmx_peer_t      *peer           = cp->mxr_peer;
         kmx_conn_t      *conn           = NULL;
-        cfs_rwlock_t    *g_lock         = &kmxlnd_data.kmx_global_lock;
+       rwlock_t        *g_lock         = &kmxlnd_data.kmx_global_lock;
 
         mx_decompose_endpoint_addr2(cp->mxr_epa, &nic_id, &ep_id, &sid);
 
@@ -3726,7 +3729,7 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
                         }
                         peer->mxp_conn->mxk_sid = sid;
                         LASSERT(peer->mxp_ep_id == ep_id);
-                        cfs_write_lock(g_lock);
+                       write_lock(g_lock);
                         existing_peer = mxlnd_find_peer_by_nid_locked(msg->mxm_srcnid);
                         if (existing_peer) {
                                 mxlnd_conn_decref(peer->mxp_conn);
@@ -3739,12 +3742,12 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
                                                   &kmxlnd_data.kmx_peers[hash]);
                                 cfs_atomic_inc(&kmxlnd_data.kmx_npeers);
                         }
-                        cfs_write_unlock(g_lock);
+                       write_unlock(g_lock);
                 } else {
                         ret = mxlnd_conn_alloc(&conn, peer); /* adds 2nd ref */
-                        cfs_write_lock(g_lock);
+                       write_lock(g_lock);
                         mxlnd_peer_decref(peer); /* drop ref taken above */
-                        cfs_write_unlock(g_lock);
+                       write_unlock(g_lock);
                         if (ret != 0) {
                                 CNETERR("Cannot allocate mxp_conn\n");
                                 goto cleanup;
@@ -3778,13 +3781,13 @@ mxlnd_passive_connect(kmx_connparams_t *cp)
                         conn = peer->mxp_conn;
                 }
         }
-        cfs_write_lock(g_lock);
-        peer->mxp_incompatible = incompatible;
-        cfs_write_unlock(g_lock);
-        cfs_spin_lock(&conn->mxk_lock);
-        conn->mxk_incarnation = msg->mxm_srcstamp;
-        mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
-        cfs_spin_unlock(&conn->mxk_lock);
+       write_lock(g_lock);
+       peer->mxp_incompatible = incompatible;
+       write_unlock(g_lock);
+       spin_lock(&conn->mxk_lock);
+       conn->mxk_incarnation = msg->mxm_srcstamp;
+       mxlnd_set_conn_status(conn, MXLND_CONN_WAIT);
+       spin_unlock(&conn->mxk_lock);
 
         /* handle_conn_ack() will create the CONN_ACK msg */
         mxlnd_iconnect(peer, (u8) MXLND_MSG_ICON_ACK);
@@ -3850,10 +3853,10 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp)
                 ret = -1;
                 goto failed;
         }
-        cfs_write_lock(&kmxlnd_data.kmx_global_lock);
-        peer->mxp_incompatible = incompatible;
-        cfs_write_unlock(&kmxlnd_data.kmx_global_lock);
-        cfs_spin_lock(&conn->mxk_lock);
+       write_lock(&kmxlnd_data.kmx_global_lock);
+       peer->mxp_incompatible = incompatible;
+       write_unlock(&kmxlnd_data.kmx_global_lock);
+       spin_lock(&conn->mxk_lock);
         conn->mxk_credits = *kmxlnd_tunables.kmx_peercredits;
         conn->mxk_outstanding = 0;
         conn->mxk_incarnation = msg->mxm_srcstamp;
@@ -3863,40 +3866,40 @@ mxlnd_check_conn_ack(kmx_connparams_t *cp)
                        libcfs_nid2str(msg->mxm_srcnid));
                 mxlnd_set_conn_status(conn, MXLND_CONN_READY);
         }
-        cfs_spin_unlock(&conn->mxk_lock);
+       spin_unlock(&conn->mxk_lock);
 
-        if (!incompatible)
-                mxlnd_check_sends(peer);
+       if (!incompatible)
+               mxlnd_check_sends(peer);
 
 failed:
-        if (ret < 0) {
-                cfs_spin_lock(&conn->mxk_lock);
-                mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
-                cfs_spin_unlock(&conn->mxk_lock);
-        }
+       if (ret < 0) {
+               spin_lock(&conn->mxk_lock);
+               mxlnd_set_conn_status(conn, MXLND_CONN_FAIL);
+               spin_unlock(&conn->mxk_lock);
+       }
 
-        if (incompatible) mxlnd_conn_disconnect(conn, 0, 0);
+       if (incompatible) mxlnd_conn_disconnect(conn, 0, 0);
 
-        mxlnd_connparams_free(cp);
-        return;
+       mxlnd_connparams_free(cp);
+       return;
 }
 
 int
 mxlnd_abort_msgs(void)
 {
-        int                     count           = 0;
-        cfs_list_t              *orphans        = &kmxlnd_data.kmx_orphan_msgs;
-        cfs_spinlock_t          *g_conn_lock    = &kmxlnd_data.kmx_conn_lock;
+       int                     count           = 0;
+       cfs_list_t              *orphans        = &kmxlnd_data.kmx_orphan_msgs;
+       spinlock_t              *g_conn_lock    = &kmxlnd_data.kmx_conn_lock;
 
-        /* abort orphans */
-        cfs_spin_lock(g_conn_lock);
-        while (!cfs_list_empty(orphans)) {
-                kmx_ctx_t       *ctx     = NULL;
-                kmx_conn_t      *conn   = NULL;
+       /* abort orphans */
+       spin_lock(g_conn_lock);
+       while (!cfs_list_empty(orphans)) {
+               kmx_ctx_t       *ctx     = NULL;
+               kmx_conn_t      *conn   = NULL;
 
-                ctx = cfs_list_entry(orphans->next, kmx_ctx_t, mxc_list);
-                cfs_list_del_init(&ctx->mxc_list);
-                cfs_spin_unlock(g_conn_lock);
+               ctx = cfs_list_entry(orphans->next, kmx_ctx_t, mxc_list);
+               cfs_list_del_init(&ctx->mxc_list);
+               spin_unlock(g_conn_lock);
 
                 ctx->mxc_errno = -ECONNABORTED;
                 conn = ctx->mxc_conn;
@@ -3913,40 +3916,40 @@ mxlnd_abort_msgs(void)
                 }
 
                 count++;
-                cfs_spin_lock(g_conn_lock);
-        }
-        cfs_spin_unlock(g_conn_lock);
+               spin_lock(g_conn_lock);
+       }
+       spin_unlock(g_conn_lock);
 
-        return count;
+       return count;
 }
 
 int
 mxlnd_free_conn_zombies(void)
 {
-        int                     count           = 0;
-        cfs_list_t             *zombies        = &kmxlnd_data.kmx_conn_zombies;
-        cfs_spinlock_t         *g_conn_lock    = &kmxlnd_data.kmx_conn_lock;
-        cfs_rwlock_t           *g_lock         = &kmxlnd_data.kmx_global_lock;
+       int             count           = 0;
+       cfs_list_t      *zombies        = &kmxlnd_data.kmx_conn_zombies;
+       spinlock_t      *g_conn_lock    = &kmxlnd_data.kmx_conn_lock;
+       rwlock_t        *g_lock         = &kmxlnd_data.kmx_global_lock;
 
-        /* cleanup any zombies */
-        cfs_spin_lock(g_conn_lock);
-        while (!cfs_list_empty(zombies)) {
-                kmx_conn_t      *conn   = NULL;
+       /* cleanup any zombies */
+       spin_lock(g_conn_lock);
+       while (!cfs_list_empty(zombies)) {
+               kmx_conn_t      *conn   = NULL;
 
-                conn = cfs_list_entry(zombies->next, kmx_conn_t, mxk_zombie);
-                cfs_list_del_init(&conn->mxk_zombie);
-                cfs_spin_unlock(g_conn_lock);
+               conn = cfs_list_entry(zombies->next, kmx_conn_t, mxk_zombie);
+               cfs_list_del_init(&conn->mxk_zombie);
+               spin_unlock(g_conn_lock);
 
-                cfs_write_lock(g_lock);
-                mxlnd_conn_free_locked(conn);
-                cfs_write_unlock(g_lock);
+               write_lock(g_lock);
+               mxlnd_conn_free_locked(conn);
+               write_unlock(g_lock);
 
-                count++;
-                cfs_spin_lock(g_conn_lock);
-        }
-        cfs_spin_unlock(g_conn_lock);
-        CDEBUG(D_NET, "%s: freed %d zombies\n", __func__, count);
-        return count;
+               count++;
+               spin_lock(g_conn_lock);
+       }
+       spin_unlock(g_conn_lock);
+       CDEBUG(D_NET, "%s: freed %d zombies\n", __func__, count);
+       return count;
 }
 
 /**
@@ -3967,10 +3970,10 @@ mxlnd_connd(void *arg)
         while (!(cfs_atomic_read(&kmxlnd_data.kmx_shutdown))) {
                 int                ret             = 0;
                 kmx_connparams_t  *cp              = NULL;
-                cfs_spinlock_t    *g_conn_lock     = &kmxlnd_data.kmx_conn_lock;
-                cfs_list_t        *conn_reqs       = &kmxlnd_data.kmx_conn_reqs;
+               spinlock_t        *g_conn_lock  = &kmxlnd_data.kmx_conn_lock;
+               cfs_list_t        *conn_reqs    = &kmxlnd_data.kmx_conn_reqs;
 
-                ret = cfs_down_interruptible(&kmxlnd_data.kmx_conn_sem);
+               ret = down_interruptible(&kmxlnd_data.kmx_conn_sem);
 
                 if (cfs_atomic_read(&kmxlnd_data.kmx_shutdown))
                         break;
@@ -3981,18 +3984,18 @@ mxlnd_connd(void *arg)
                 ret = mxlnd_abort_msgs();
                 ret += mxlnd_free_conn_zombies();
 
-                cfs_spin_lock(g_conn_lock);
-                if (cfs_list_empty(conn_reqs)) {
-                        if (ret == 0)
-                                CNETERR("connd woke up but did not "
-                                       "find a kmx_connparams_t or zombie conn\n");
-                        cfs_spin_unlock(g_conn_lock);
-                        continue;
-                }
-                cp = cfs_list_entry(conn_reqs->next, kmx_connparams_t,
-                                    mxr_list);
-                cfs_list_del_init(&cp->mxr_list);
-                cfs_spin_unlock(g_conn_lock);
+               spin_lock(g_conn_lock);
+               if (cfs_list_empty(conn_reqs)) {
+                       if (ret == 0)
+                               CNETERR("connd woke up but did not find a "
+                                       "kmx_connparams_t or zombie conn\n");
+                       spin_unlock(g_conn_lock);
+                       continue;
+               }
+               cp = cfs_list_entry(conn_reqs->next, kmx_connparams_t,
+                                   mxr_list);
+               cfs_list_del_init(&cp->mxr_list);
+               spin_unlock(g_conn_lock);
 
                 switch (MXLND_MSG_TYPE(cp->mxr_match)) {
                 case MXLND_MSG_CONN_REQ:
@@ -4033,7 +4036,7 @@ mxlnd_timeoutd(void *arg)
         kmx_peer_t     *peer    = NULL;
         kmx_peer_t     *temp    = NULL;
         kmx_conn_t     *conn    = NULL;
-        cfs_rwlock_t   *g_lock  = &kmxlnd_data.kmx_global_lock;
+       rwlock_t   *g_lock  = &kmxlnd_data.kmx_global_lock;
 
         cfs_daemonize("mxlnd_timeoutd");
 
@@ -4048,7 +4051,7 @@ mxlnd_timeoutd(void *arg)
                 }
 
                 /* try to progress peers' txs */
-               cfs_write_lock(g_lock);
+               write_lock(g_lock);
                 for (i = 0; i < MXLND_HASH_SIZE; i++) {
                         cfs_list_t *peers = &kmxlnd_data.kmx_peers[i];
 
@@ -4074,15 +4077,15 @@ mxlnd_timeoutd(void *arg)
                                     cfs_time_after(now,
                                                    conn->mxk_last_tx +
                                                    CFS_HZ)) {
-                                        cfs_write_unlock(g_lock);
-                                        mxlnd_check_sends(peer);
-                                        cfs_write_lock(g_lock);
-                                }
-                                mxlnd_conn_decref(conn); /* until here */
-                                mxlnd_peer_decref(peer); /* ...to here */
-                        }
-                }
-                cfs_write_unlock(g_lock);
+                                       write_unlock(g_lock);
+                                       mxlnd_check_sends(peer);
+                                       write_lock(g_lock);
+                               }
+                               mxlnd_conn_decref(conn); /* until here */
+                               mxlnd_peer_decref(peer); /* ...to here */
+                       }
+               }
+               write_unlock(g_lock);
 
                 mxlnd_sleep(delay);
         }
index 9da9fa9..abcb56d 100644 (file)
@@ -352,7 +352,7 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
         CFS_INIT_LIST_HEAD(&peer->ibp_conns);
         CFS_INIT_LIST_HEAD(&peer->ibp_tx_queue);
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         /* always called with a ref on ni, which prevents ni being shutdown */
         LASSERT (net->ibn_shutdown == 0);
@@ -360,7 +360,7 @@ kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
         /* npeers only grows with the global lock held */
         cfs_atomic_inc(&net->ibn_npeers);
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         *peerp = peer;
         return 0;
@@ -437,7 +437,7 @@ kiblnd_get_peer_info (lnet_ni_t *ni, int index,
         int                    i;
         unsigned long          flags;
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
 
@@ -457,13 +457,13 @@ kiblnd_get_peer_info (lnet_ni_t *ni, int index,
                         *nidp = peer->ibp_nid;
                         *count = cfs_atomic_read(&peer->ibp_refcount);
 
-                        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                                   flags);
-                        return 0;
-                }
-        }
+                       read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                              flags);
+                       return 0;
+               }
+       }
 
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
         return -ENOENT;
 }
 
@@ -501,7 +501,7 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
         unsigned long          flags;
         int                    rc = -ENOENT;
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         if (nid != LNET_NID_ANY) {
                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
@@ -535,7 +535,7 @@ kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
                 }
         }
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         kiblnd_txlist_done(ni, &zombies, -EIO);
 
@@ -552,7 +552,7 @@ kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
         int                    i;
         unsigned long          flags;
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
                 cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
@@ -572,14 +572,14 @@ kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
                                 conn = cfs_list_entry(ctmp, kib_conn_t,
                                                       ibc_list);
                                 kiblnd_conn_addref(conn);
-                                cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                                           flags);
-                                return conn;
-                        }
-                }
-        }
+                               read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+                                                      flags);
+                               return conn;
+                       }
+               }
+       }
 
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
         return NULL;
 }
 
@@ -606,10 +606,10 @@ kiblnd_debug_tx (kib_tx_t *tx)
 void
 kiblnd_debug_conn (kib_conn_t *conn)
 {
-        cfs_list_t           *tmp;
-        int                   i;
+       cfs_list_t      *tmp;
+       int             i;
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
         CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
                cfs_atomic_read(&conn->ibc_refcount), conn,
@@ -648,7 +648,7 @@ kiblnd_debug_conn (kib_conn_t *conn)
         for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
                 kiblnd_debug_rx(&conn->ibc_rxs[i]);
 
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_unlock(&conn->ibc_lock);
 }
 
 int
@@ -725,7 +725,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
          * she must dispose of 'cmid'.  (Actually I'd block forever if I tried
          * to destroy 'cmid' here since I'm called from the CM which still has
          * its ref on 'cmid'). */
-        cfs_rwlock_t           *glock = &kiblnd_data.kib_global_lock;
+       rwlock_t                *glock = &kiblnd_data.kib_global_lock;
         kib_net_t              *net = peer->ibp_ni->ni_data;
         kib_dev_t              *dev = net->ibn_dev;
         struct ib_qp_init_attr *init_qp_attr;
@@ -772,21 +772,21 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
         CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
         CFS_INIT_LIST_HEAD(&conn->ibc_active_txs);
-        cfs_spin_lock_init(&conn->ibc_lock);
+       spin_lock_init(&conn->ibc_lock);
 
        LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
                         sizeof(*conn->ibc_connvars));
-        if (conn->ibc_connvars == NULL) {
-                CERROR("Can't allocate in-progress connection state\n");
-                goto failed_2;
-        }
+       if (conn->ibc_connvars == NULL) {
+               CERROR("Can't allocate in-progress connection state\n");
+               goto failed_2;
+       }
 
-        cfs_write_lock_irqsave(glock, flags);
-        if (dev->ibd_failover) {
-                cfs_write_unlock_irqrestore(glock, flags);
-                CERROR("%s: failover in progress\n", dev->ibd_ifname);
-                goto failed_2;
-        }
+       write_lock_irqsave(glock, flags);
+       if (dev->ibd_failover) {
+               write_unlock_irqrestore(glock, flags);
+               CERROR("%s: failover in progress\n", dev->ibd_ifname);
+               goto failed_2;
+       }
 
         if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
                 /* wakeup failover thread and teardown connection */
@@ -796,7 +796,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
                         cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
                 }
 
-                cfs_write_unlock_irqrestore(glock, flags);
+               write_unlock_irqrestore(glock, flags);
                 CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
                        cmid->device->name, dev->ibd_ifname);
                 goto failed_2;
@@ -807,7 +807,7 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 
         kiblnd_setup_mtu_locked(cmid);
 
-        cfs_write_unlock_irqrestore(glock, flags);
+       write_unlock_irqrestore(glock, flags);
 
        LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
                         IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
@@ -818,10 +818,10 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 
        rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
                                IBLND_RX_MSG_PAGES(version));
-        if (rc != 0)
-                goto failed_2;
+       if (rc != 0)
+               goto failed_2;
 
-        kiblnd_map_rx_descs(conn);
+       kiblnd_map_rx_descs(conn);
 
 #ifdef HAVE_OFED_IB_COMP_VECTOR
        cq = ib_create_cq(cmid->device,
@@ -886,9 +886,9 @@ kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
 
                         /* correct # of posted buffers
                          * NB locking needed now I'm racing with completion */
-                       cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+                       spin_lock_irqsave(&sched->ibs_lock, flags);
                        conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
-                       cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+                       spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                         /* cmid will be destroyed by CM(ofed) after cm_callback
                          * returned, so we can't refer it anymore
@@ -1053,7 +1053,7 @@ kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
         unsigned long           flags;
         int                     count = 0;
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         if (nid != LNET_NID_ANY)
                 lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
@@ -1080,7 +1080,7 @@ kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
                 }
         }
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         /* wildcards always succeed */
         if (nid == LNET_NID_ANY)
@@ -1146,13 +1146,13 @@ kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 void
 kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
 {
-        cfs_time_t         last_alive = 0;
-        cfs_time_t         now = cfs_time_current();
-        cfs_rwlock_t      *glock = &kiblnd_data.kib_global_lock;
-        kib_peer_t        *peer;
-        unsigned long      flags;
+       cfs_time_t      last_alive = 0;
+       cfs_time_t      now = cfs_time_current();
+       rwlock_t        *glock = &kiblnd_data.kib_global_lock;
+       kib_peer_t      *peer;
+       unsigned long   flags;
 
-        cfs_read_lock_irqsave(glock, flags);
+       read_lock_irqsave(glock, flags);
 
         peer = kiblnd_find_peer_locked(nid);
         if (peer != NULL) {
@@ -1162,7 +1162,7 @@ kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
                 last_alive = peer->ibp_last_alive;
         }
 
-        cfs_read_unlock_irqrestore(glock, flags);
+       read_unlock_irqrestore(glock, flags);
 
         if (last_alive != 0)
                 *when = last_alive;
@@ -1317,22 +1317,23 @@ kiblnd_current_hdev(kib_dev_t *dev)
         unsigned long  flags;
         int            i = 0;
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        while (dev->ibd_failover) {
-                cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-                if (i++ % 50 == 0)
-                        CDEBUG(D_NET, "Wait for dev(%s) failover\n", dev->ibd_ifname);
-                cfs_schedule_timeout(cfs_time_seconds(1) / 100);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       while (dev->ibd_failover) {
+               read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+               if (i++ % 50 == 0)
+                       CDEBUG(D_NET, "%s: Wait for failover\n",
+                              dev->ibd_ifname);
+               cfs_schedule_timeout(cfs_time_seconds(1) / 100);
 
-                cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        }
+               read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       }
 
-        kiblnd_hdev_addref_locked(dev->ibd_hdev);
-        hdev = dev->ibd_hdev;
+       kiblnd_hdev_addref_locked(dev->ibd_hdev);
+       hdev = dev->ibd_hdev;
 
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-        return hdev;
+       return hdev;
 }
 
 static void
@@ -1525,7 +1526,7 @@ kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, cfs_list_t *zombies)
        if (fps->fps_net == NULL) /* intialized? */
                return;
 
-        cfs_spin_lock(&fps->fps_lock);
+       spin_lock(&fps->fps_lock);
 
         while (!cfs_list_empty(&fps->fps_pool_list)) {
                 kib_fmr_pool_t *fpo = cfs_list_entry(fps->fps_pool_list.next,
@@ -1538,7 +1539,7 @@ kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, cfs_list_t *zombies)
                         cfs_list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
         }
 
-        cfs_spin_unlock(&fps->fps_lock);
+       spin_unlock(&fps->fps_lock);
 }
 
 static void
@@ -1563,7 +1564,7 @@ kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net,
        fps->fps_cpt = cpt;
        fps->fps_pool_size = pool_size;
        fps->fps_flush_trigger = flush_trigger;
-       cfs_spin_lock_init(&fps->fps_lock);
+       spin_lock_init(&fps->fps_lock);
        CFS_INIT_LIST_HEAD(&fps->fps_pool_list);
        CFS_INIT_LIST_HEAD(&fps->fps_failed_pool_list);
 
@@ -1605,7 +1606,7 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
         fmr->fmr_pool = NULL;
         fmr->fmr_pfmr = NULL;
 
-        cfs_spin_lock(&fps->fps_lock);
+       spin_lock(&fps->fps_lock);
         fpo->fpo_map_count --;  /* decref the pool */
 
         cfs_list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
@@ -1618,7 +1619,7 @@ kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
                         fps->fps_version ++;
                 }
         }
-        cfs_spin_unlock(&fps->fps_lock);
+       spin_unlock(&fps->fps_lock);
 
         if (!cfs_list_empty(&zombies))
                 kiblnd_destroy_fmr_pool_list(&zombies);
@@ -1634,12 +1635,12 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
         int                 rc;
 
  again:
-        cfs_spin_lock(&fps->fps_lock);
-        version = fps->fps_version;
-        cfs_list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
-                fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
-                fpo->fpo_map_count ++;
-                cfs_spin_unlock(&fps->fps_lock);
+       spin_lock(&fps->fps_lock);
+       version = fps->fps_version;
+       cfs_list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
+               fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+               fpo->fpo_map_count++;
+               spin_unlock(&fps->fps_lock);
 
                 pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
                                             pages, npages, iov);
@@ -1649,51 +1650,51 @@ kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
                         return 0;
                 }
 
-                cfs_spin_lock(&fps->fps_lock);
-                fpo->fpo_map_count --;
-                if (PTR_ERR(pfmr) != -EAGAIN) {
-                        cfs_spin_unlock(&fps->fps_lock);
-                        return PTR_ERR(pfmr);
-                }
+               spin_lock(&fps->fps_lock);
+               fpo->fpo_map_count--;
+               if (PTR_ERR(pfmr) != -EAGAIN) {
+                       spin_unlock(&fps->fps_lock);
+                       return PTR_ERR(pfmr);
+               }
 
-                /* EAGAIN and ... */
-                if (version != fps->fps_version) {
-                        cfs_spin_unlock(&fps->fps_lock);
-                        goto again;
-                }
-        }
+               /* EAGAIN and ... */
+               if (version != fps->fps_version) {
+                       spin_unlock(&fps->fps_lock);
+                       goto again;
+               }
+       }
 
-        if (fps->fps_increasing) {
-                cfs_spin_unlock(&fps->fps_lock);
-                CDEBUG(D_NET, "Another thread is allocating new "
-                              "FMR pool, waiting for her to complete\n");
-                cfs_schedule();
-                goto again;
+       if (fps->fps_increasing) {
+               spin_unlock(&fps->fps_lock);
+               CDEBUG(D_NET, "Another thread is allocating new "
+                      "FMR pool, waiting for her to complete\n");
+               cfs_schedule();
+               goto again;
 
-        }
+       }
 
-        if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
-                /* someone failed recently */
-                cfs_spin_unlock(&fps->fps_lock);
-                return -EAGAIN;
-        }
+       if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
+               /* someone failed recently */
+               spin_unlock(&fps->fps_lock);
+               return -EAGAIN;
+       }
 
-        fps->fps_increasing = 1;
-        cfs_spin_unlock(&fps->fps_lock);
+       fps->fps_increasing = 1;
+       spin_unlock(&fps->fps_lock);
 
-        CDEBUG(D_NET, "Allocate new FMR pool\n");
-        rc = kiblnd_create_fmr_pool(fps, &fpo);
-        cfs_spin_lock(&fps->fps_lock);
-        fps->fps_increasing = 0;
-        if (rc == 0) {
-                fps->fps_version ++;
-                cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
-        } else {
-                fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
-        }
-        cfs_spin_unlock(&fps->fps_lock);
+       CDEBUG(D_NET, "Allocate new FMR pool\n");
+       rc = kiblnd_create_fmr_pool(fps, &fpo);
+       spin_lock(&fps->fps_lock);
+       fps->fps_increasing = 0;
+       if (rc == 0) {
+               fps->fps_version++;
+               cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+       } else {
+               fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+       }
+       spin_unlock(&fps->fps_lock);
 
-        goto again;
+       goto again;
 }
 
 static void
@@ -1737,7 +1738,7 @@ kiblnd_fail_poolset(kib_poolset_t *ps, cfs_list_t *zombies)
        if (ps->ps_net == NULL) /* intialized? */
                return;
 
-        cfs_spin_lock(&ps->ps_lock);
+       spin_lock(&ps->ps_lock);
         while (!cfs_list_empty(&ps->ps_pool_list)) {
                 kib_pool_t *po = cfs_list_entry(ps->ps_pool_list.next,
                                             kib_pool_t, po_list);
@@ -1748,7 +1749,7 @@ kiblnd_fail_poolset(kib_poolset_t *ps, cfs_list_t *zombies)
                 else
                         cfs_list_add(&po->po_list, &ps->ps_failed_pool_list);
         }
-        cfs_spin_unlock(&ps->ps_lock);
+       spin_unlock(&ps->ps_lock);
 }
 
 static void
@@ -1781,7 +1782,7 @@ kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
         ps->ps_node_fini    = nd_fini;
         ps->ps_pool_size    = size;
         strncpy(ps->ps_name, name, IBLND_POOL_NAME_LEN);
-        cfs_spin_lock_init(&ps->ps_lock);
+       spin_lock_init(&ps->ps_lock);
         CFS_INIT_LIST_HEAD(&ps->ps_pool_list);
         CFS_INIT_LIST_HEAD(&ps->ps_failed_pool_list);
 
@@ -1812,7 +1813,7 @@ kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node)
         kib_pool_t     *tmp;
         cfs_time_t      now = cfs_time_current();
 
-        cfs_spin_lock(&ps->ps_lock);
+       spin_lock(&ps->ps_lock);
 
         if (ps->ps_node_fini != NULL)
                 ps->ps_node_fini(pool, node);
@@ -1829,10 +1830,10 @@ kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node)
                 if (kiblnd_pool_is_idle(pool, now))
                         cfs_list_move(&pool->po_list, &zombies);
         }
-        cfs_spin_unlock(&ps->ps_lock);
+       spin_unlock(&ps->ps_lock);
 
-        if (!cfs_list_empty(&zombies))
-                kiblnd_destroy_pool_list(&zombies);
+       if (!cfs_list_empty(&zombies))
+               kiblnd_destroy_pool_list(&zombies);
 }
 
 cfs_list_t *
@@ -1843,7 +1844,7 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
         int                    rc;
 
  again:
-        cfs_spin_lock(&ps->ps_lock);
+       spin_lock(&ps->ps_lock);
         cfs_list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
                 if (cfs_list_empty(&pool->po_free_list))
                         continue;
@@ -1857,14 +1858,14 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
                         /* still hold the lock */
                         ps->ps_node_init(pool, node);
                 }
-                cfs_spin_unlock(&ps->ps_lock);
-                return node;
-        }
+               spin_unlock(&ps->ps_lock);
+               return node;
+       }
 
-        /* no available tx pool and ... */
-        if (ps->ps_increasing) {
-                /* another thread is allocating a new pool */
-                cfs_spin_unlock(&ps->ps_lock);
+       /* no available tx pool and ... */
+       if (ps->ps_increasing) {
+               /* another thread is allocating a new pool */
+               spin_unlock(&ps->ps_lock);
                 CDEBUG(D_NET, "Another thread is allocating new "
                        "%s pool, waiting for her to complete\n",
                        ps->ps_name);
@@ -1872,20 +1873,20 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
                 goto again;
         }
 
-        if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
-                /* someone failed recently */
-                cfs_spin_unlock(&ps->ps_lock);
-                return NULL;
-        }
+       if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
+               /* someone failed recently */
+               spin_unlock(&ps->ps_lock);
+               return NULL;
+       }
 
-        ps->ps_increasing = 1;
-        cfs_spin_unlock(&ps->ps_lock);
+       ps->ps_increasing = 1;
+       spin_unlock(&ps->ps_lock);
 
-        CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
+       CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
 
-        rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
+       rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
 
-        cfs_spin_lock(&ps->ps_lock);
+       spin_lock(&ps->ps_lock);
         ps->ps_increasing = 0;
         if (rc == 0) {
                 cfs_list_add_tail(&pool->po_list, &ps->ps_pool_list);
@@ -1894,9 +1895,9 @@ kiblnd_pool_alloc_node(kib_poolset_t *ps)
                 CERROR("Can't allocate new %s pool because out of memory\n",
                        ps->ps_name);
         }
-        cfs_spin_unlock(&ps->ps_lock);
+       spin_unlock(&ps->ps_lock);
 
-        goto again;
+       goto again;
 }
 
 void
@@ -2238,15 +2239,15 @@ kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
        int             rc;
        int             i;
 
-       cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        if (*kiblnd_tunables.kib_map_on_demand == 0 &&
            net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
-               cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+               read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
                                           flags);
                goto create_tx_pool;
        }
 
-       cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
        if (*kiblnd_tunables.kib_fmr_pool_size <
            *kiblnd_tunables.kib_ntx / 4) {
@@ -2618,13 +2619,13 @@ kiblnd_dev_failover(kib_dev_t *dev)
                  * because we can fail to create new listener.
                  * But we have to close it now, otherwise rdma_bind_addr
                  * will return EADDRINUSE... How crap! */
-                cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+               write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-                cmid = dev->ibd_hdev->ibh_cmid;
-                /* make next schedule of kiblnd_dev_need_failover
-                 * will return 1 for me */
-                dev->ibd_hdev->ibh_cmid  = NULL;
-                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+               cmid = dev->ibd_hdev->ibh_cmid;
+               /* make next schedule of kiblnd_dev_need_failover()
+                * return 1 for me */
+               dev->ibd_hdev->ibh_cmid  = NULL;
+               write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
                 rdma_destroy_id(cmid);
         }
@@ -2686,7 +2687,7 @@ kiblnd_dev_failover(kib_dev_t *dev)
                 goto out;
         }
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         old = dev->ibd_hdev;
         dev->ibd_hdev = hdev; /* take over the refcount */
@@ -2708,7 +2709,7 @@ kiblnd_dev_failover(kib_dev_t *dev)
                }
        }
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
  out:
         if (!cfs_list_empty(&zombie_tpo))
                 kiblnd_destroy_pool_list(&zombie_tpo);
@@ -2871,7 +2872,7 @@ void
 kiblnd_shutdown (lnet_ni_t *ni)
 {
         kib_net_t        *net = ni->ni_data;
-        cfs_rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
+       rwlock_t     *g_lock = &kiblnd_data.kib_global_lock;
         int               i;
         unsigned long     flags;
 
@@ -2883,9 +2884,9 @@ kiblnd_shutdown (lnet_ni_t *ni)
         CDEBUG(D_MALLOC, "before LND net cleanup: kmem %d\n",
                cfs_atomic_read(&libcfs_kmemory));
 
-        cfs_write_lock_irqsave(g_lock, flags);
-        net->ibn_shutdown = 1;
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_lock_irqsave(g_lock, flags);
+       net->ibn_shutdown = 1;
+       write_unlock_irqrestore(g_lock, flags);
 
         switch (net->ibn_init) {
         default:
@@ -2908,11 +2909,11 @@ kiblnd_shutdown (lnet_ni_t *ni)
 
                kiblnd_net_fini_pools(net);
 
-                cfs_write_lock_irqsave(g_lock, flags);
-                LASSERT (net->ibn_dev->ibd_nnets > 0);
-                net->ibn_dev->ibd_nnets--;
-                cfs_list_del(&net->ibn_list);
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_lock_irqsave(g_lock, flags);
+               LASSERT(net->ibn_dev->ibd_nnets > 0);
+               net->ibn_dev->ibd_nnets--;
+               cfs_list_del(&net->ibn_list);
+               write_unlock_irqrestore(g_lock, flags);
 
                 /* fall through */
 
@@ -2952,7 +2953,7 @@ kiblnd_base_startup(void)
         PORTAL_MODULE_USE;
         memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
 
-        cfs_rwlock_init(&kiblnd_data.kib_global_lock);
+       rwlock_init(&kiblnd_data.kib_global_lock);
 
         CFS_INIT_LIST_HEAD(&kiblnd_data.kib_devs);
         CFS_INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
@@ -2967,7 +2968,7 @@ kiblnd_base_startup(void)
         for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
                 CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
 
-        cfs_spin_lock_init(&kiblnd_data.kib_connd_lock);
+       spin_lock_init(&kiblnd_data.kib_connd_lock);
         CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
         CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
         cfs_waitq_init(&kiblnd_data.kib_connd_waitq);
@@ -2981,7 +2982,7 @@ kiblnd_base_startup(void)
        cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
                int     nthrs;
 
-               cfs_spin_lock_init(&sched->ibs_lock);
+               spin_lock_init(&sched->ibs_lock);
                CFS_INIT_LIST_HEAD(&sched->ibs_conns);
                cfs_waitq_init(&sched->ibs_waitq);
 
@@ -3204,10 +3205,10 @@ kiblnd_startup (lnet_ni_t *ni)
                 goto failed;
         }
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        ibdev->ibd_nnets++;
-        cfs_list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       ibdev->ibd_nnets++;
+       cfs_list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         net->ibn_init = IBLND_INIT_ALL;
 
index f3add0b..9913236 100644 (file)
@@ -265,7 +265,7 @@ struct kib_net;
 
 typedef struct kib_poolset
 {
-        cfs_spinlock_t          ps_lock;                /* serialize */
+       spinlock_t              ps_lock;                /* serialize */
         struct kib_net         *ps_net;                 /* network it belongs to */
         char                    ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
         cfs_list_t              ps_pool_list;           /* list of pools */
@@ -315,7 +315,7 @@ typedef struct kib_pmr_pool {
 
 typedef struct
 {
-        cfs_spinlock_t          fps_lock;               /* serialize */
+       spinlock_t              fps_lock;               /* serialize */
         struct kib_net         *fps_net;                /* IB network */
         cfs_list_t              fps_pool_list;          /* FMR pool list */
         cfs_list_t              fps_failed_pool_list;   /* FMR pool list */
@@ -369,7 +369,7 @@ typedef struct kib_net
 
 struct kib_sched_info {
        /* serialise */
-       cfs_spinlock_t          ibs_lock;
+       spinlock_t              ibs_lock;
        /* schedulers sleep here */
        cfs_waitq_t             ibs_waitq;
        /* conns to check for rx completions */
@@ -392,7 +392,7 @@ typedef struct
        cfs_waitq_t             kib_failover_waitq;
        cfs_atomic_t            kib_nthreads;   /* # live threads */
        /* stabilize net/dev/peer/conn ops */
-       cfs_rwlock_t            kib_global_lock;
+       rwlock_t                kib_global_lock;
        /* hash table of all my known peers */
        cfs_list_t              *kib_peers;
        /* size of kib_peers */
@@ -405,7 +405,7 @@ typedef struct
        cfs_list_t              kib_connd_zombies;
        /* connection daemon sleeps here */
        cfs_waitq_t             kib_connd_waitq;
-       cfs_spinlock_t          kib_connd_lock; /* serialise */
+       spinlock_t              kib_connd_lock; /* serialise */
        struct ib_qp_attr       kib_error_qpa;  /* QP->ERROR */
        /* percpt data for schedulers */
        struct kib_sched_info   **kib_scheds;
@@ -621,7 +621,7 @@ typedef struct kib_conn
         cfs_list_t           ibc_tx_queue_nocred;/* sends that don't need a credit */
         cfs_list_t           ibc_tx_queue_rsrvd; /* sends that need to reserve an ACK/DONE msg */
         cfs_list_t           ibc_active_txs;     /* active tx awaiting completion */
-        cfs_spinlock_t       ibc_lock;           /* serialise */
+       spinlock_t           ibc_lock;           /* serialise */
         kib_rx_t            *ibc_rxs;            /* the rx descs */
         kib_pages_t         *ibc_rx_pages;       /* premapped rx msg pages */
 
@@ -695,20 +695,20 @@ do {                                                            \
         cfs_atomic_inc(&(conn)->ibc_refcount);                  \
 } while (0)
 
-#define kiblnd_conn_decref(conn)                                               \
-do {                                                                           \
-        unsigned long   flags;                                                 \
-                                                                               \
-        CDEBUG(D_NET, "conn[%p] (%d)--\n",                                     \
-               (conn), cfs_atomic_read(&(conn)->ibc_refcount));                \
-        LASSERT_ATOMIC_POS(&(conn)->ibc_refcount);                             \
-        if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) {                  \
-                cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);     \
-                cfs_list_add_tail(&(conn)->ibc_list,                           \
-                                  &kiblnd_data.kib_connd_zombies);             \
-                cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);                \
-                cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
-        }                                                                      \
+#define kiblnd_conn_decref(conn)                                       \
+do {                                                                   \
+       unsigned long flags;                                            \
+                                                                       \
+       CDEBUG(D_NET, "conn[%p] (%d)--\n",                              \
+              (conn), cfs_atomic_read(&(conn)->ibc_refcount));         \
+       LASSERT_ATOMIC_POS(&(conn)->ibc_refcount);                      \
+       if (cfs_atomic_dec_and_test(&(conn)->ibc_refcount)) {           \
+               spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);  \
+               cfs_list_add_tail(&(conn)->ibc_list,                    \
+                                 &kiblnd_data.kib_connd_zombies);      \
+               cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);         \
+               spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
+       }                                                               \
 } while (0)
 
 #define kiblnd_peer_addref(peer)                                \
index 2ebea68..76c844d 100644 (file)
@@ -132,10 +132,10 @@ kiblnd_drop_rx(kib_rx_t *rx)
        struct kib_sched_info   *sched  = conn->ibc_sched;
        unsigned long           flags;
 
-       cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+       spin_lock_irqsave(&sched->ibs_lock, flags);
        LASSERT(conn->ibc_nrx > 0);
        conn->ibc_nrx--;
-       cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+       spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
        kiblnd_conn_decref(conn);
 }
@@ -196,15 +196,15 @@ kiblnd_post_rx (kib_rx_t *rx, int credit)
         if (credit == IBLND_POSTRX_NO_CREDIT)
                 return 0;
 
-        cfs_spin_lock(&conn->ibc_lock);
-        if (credit == IBLND_POSTRX_PEER_CREDIT)
-                conn->ibc_outstanding_credits++;
-        else
-                conn->ibc_reserved_credits++;
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
+       if (credit == IBLND_POSTRX_PEER_CREDIT)
+               conn->ibc_outstanding_credits++;
+       else
+               conn->ibc_reserved_credits++;
+       spin_unlock(&conn->ibc_lock);
 
-        kiblnd_check_sends(conn);
-        return 0;
+       kiblnd_check_sends(conn);
+       return 0;
 }
 
 kib_tx_t *
@@ -235,15 +235,15 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
 void
 kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
 {
-        kib_tx_t    *tx;
-        lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
-        int          idle;
+       kib_tx_t    *tx;
+       lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
+       int          idle;
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
-        tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
-        if (tx == NULL) {
-                cfs_spin_unlock(&conn->ibc_lock);
+       tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
+       if (tx == NULL) {
+               spin_unlock(&conn->ibc_lock);
 
                 CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
@@ -265,10 +265,10 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
         if (idle)
                 cfs_list_del(&tx->tx_list);
 
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_unlock(&conn->ibc_lock);
 
-        if (idle)
-                kiblnd_tx_done(ni, tx);
+       if (idle)
+               kiblnd_tx_done(ni, tx);
 }
 
 void
@@ -310,12 +310,12 @@ kiblnd_handle_rx (kib_rx_t *rx)
 
         if (credits != 0) {
                 /* Have I received credits that will let me send? */
-                cfs_spin_lock(&conn->ibc_lock);
+               spin_lock(&conn->ibc_lock);
 
-                if (conn->ibc_credits + credits >
-                    IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
-                        rc2 = conn->ibc_credits;
-                        cfs_spin_unlock(&conn->ibc_lock);
+               if (conn->ibc_credits + credits >
+                   IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
+                       rc2 = conn->ibc_credits;
+                       spin_unlock(&conn->ibc_lock);
 
                         CERROR("Bad credits from %s: %d + %d > %d\n",
                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
@@ -334,7 +334,7 @@ kiblnd_handle_rx (kib_rx_t *rx)
                     !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
                         conn->ibc_outstanding_credits++;
 
-                cfs_spin_unlock(&conn->ibc_lock);
+               spin_unlock(&conn->ibc_lock);
                 kiblnd_check_sends(conn);
         }
 
@@ -386,12 +386,12 @@ kiblnd_handle_rx (kib_rx_t *rx)
         case IBLND_MSG_PUT_ACK:
                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
 
-                cfs_spin_lock(&conn->ibc_lock);
-                tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
-                                                   msg->ibm_u.putack.ibpam_src_cookie);
-                if (tx != NULL)
-                        cfs_list_del(&tx->tx_list);
-                cfs_spin_unlock(&conn->ibc_lock);
+               spin_lock(&conn->ibc_lock);
+               tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
+                                       msg->ibm_u.putack.ibpam_src_cookie);
+               if (tx != NULL)
+                       cfs_list_del(&tx->tx_list);
+               spin_unlock(&conn->ibc_lock);
 
                 if (tx == NULL) {
                         CERROR("Unmatched PUT_ACK from %s\n",
@@ -415,11 +415,11 @@ kiblnd_handle_rx (kib_rx_t *rx)
                         CERROR("Can't setup rdma for PUT to %s: %d\n",
                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
 
-                cfs_spin_lock(&conn->ibc_lock);
-                tx->tx_waiting = 0;             /* clear waiting and queue atomically */
-                kiblnd_queue_tx_locked(tx, conn);
-                cfs_spin_unlock(&conn->ibc_lock);
-                break;
+               spin_lock(&conn->ibc_lock);
+               tx->tx_waiting = 0;     /* clear waiting and queue atomically */
+               kiblnd_queue_tx_locked(tx, conn);
+               spin_unlock(&conn->ibc_lock);
+               break;
 
         case IBLND_MSG_PUT_DONE:
                 post_credit = IBLND_POSTRX_PEER_CREDIT;
@@ -500,17 +500,17 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
         /* racing with connection establishment/teardown! */
 
         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-                cfs_rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
-                unsigned long  flags;
-
-                cfs_write_lock_irqsave(g_lock, flags);
-                /* must check holding global lock to eliminate race */
-                if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
-                        cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
-                        cfs_write_unlock_irqrestore(g_lock, flags);
-                        return;
-                }
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
+               unsigned long  flags;
+
+               write_lock_irqsave(g_lock, flags);
+               /* must check holding global lock to eliminate race */
+               if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+                       cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+                       write_unlock_irqrestore(g_lock, flags);
+                       return;
+               }
+               write_unlock_irqrestore(g_lock, flags);
         }
         kiblnd_handle_rx(rx);
         return;
@@ -836,9 +836,9 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
                 /* OK to drop when posted enough NOOPs, since
                  * kiblnd_check_sends will queue NOOP again when
                  * posted NOOPs complete */
-                cfs_spin_unlock(&conn->ibc_lock);
-                kiblnd_tx_done(peer->ibp_ni, tx);
-                cfs_spin_lock(&conn->ibc_lock);
+               spin_unlock(&conn->ibc_lock);
+               kiblnd_tx_done(peer->ibp_ni, tx);
+               spin_lock(&conn->ibc_lock);
                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
                        libcfs_nid2str(peer->ibp_nid),
                        conn->ibc_noops_posted);
@@ -896,7 +896,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         if (done)
                 cfs_list_del(&tx->tx_list);
 
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_unlock(&conn->ibc_lock);
 
         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
                 CERROR("Error %d posting transmit to %s\n",
@@ -910,9 +910,9 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
         if (done)
                 kiblnd_tx_done(peer->ibp_ni, tx);
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
-        return -EIO;
+       return -EIO;
 }
 
 void
@@ -929,7 +929,7 @@ kiblnd_check_sends (kib_conn_t *conn)
                 return;
         }
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
         LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
@@ -946,13 +946,13 @@ kiblnd_check_sends (kib_conn_t *conn)
         }
 
         if (kiblnd_need_noop(conn)) {
-                cfs_spin_unlock(&conn->ibc_lock);
+               spin_unlock(&conn->ibc_lock);
 
                tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
-                if (tx != NULL)
-                        kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
+               if (tx != NULL)
+                       kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
 
-                cfs_spin_lock(&conn->ibc_lock);
+               spin_lock(&conn->ibc_lock);
                 if (tx != NULL)
                         kiblnd_queue_tx_locked(tx, conn);
         }
@@ -982,9 +982,9 @@ kiblnd_check_sends (kib_conn_t *conn)
                         break;
         }
 
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_unlock(&conn->ibc_lock);
 
-        kiblnd_conn_decref(conn); /* ...until here */
+       kiblnd_conn_decref(conn); /* ...until here */
 }
 
 void
@@ -1009,7 +1009,7 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
                 kiblnd_peer_alive(conn->ibc_peer);
         }
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
          * gets to free it, which also drops its ref on 'conn'. */
@@ -1032,7 +1032,7 @@ kiblnd_tx_complete (kib_tx_t *tx, int status)
 
         kiblnd_conn_addref(conn);               /* 1 ref for me.... */
 
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_unlock(&conn->ibc_lock);
 
         if (idle)
                 kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
@@ -1217,11 +1217,11 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
 void
 kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
 {
-        cfs_spin_lock(&conn->ibc_lock);
-        kiblnd_queue_tx_locked(tx, conn);
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
+       kiblnd_queue_tx_locked(tx, conn);
+       spin_unlock(&conn->ibc_lock);
 
-        kiblnd_check_sends(conn);
+       kiblnd_check_sends(conn);
 }
 
 static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
@@ -1337,7 +1337,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
         kib_peer_t        *peer;
         kib_peer_t        *peer2;
         kib_conn_t        *conn;
-        cfs_rwlock_t      *g_lock = &kiblnd_data.kib_global_lock;
+       rwlock_t        *g_lock = &kiblnd_data.kib_global_lock;
         unsigned long      flags;
         int                rc;
 
@@ -1349,7 +1349,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
 
         /* First time, just use a read lock since I expect to find my peer
          * connected */
-        cfs_read_lock_irqsave(g_lock, flags);
+       read_lock_irqsave(g_lock, flags);
 
         peer = kiblnd_find_peer_locked(nid);
         if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) {
@@ -1357,7 +1357,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 conn = kiblnd_get_conn_locked(peer);
                 kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                cfs_read_unlock_irqrestore(g_lock, flags);
+               read_unlock_irqrestore(g_lock, flags);
 
                 if (tx != NULL)
                         kiblnd_queue_tx(tx, conn);
@@ -1365,9 +1365,9 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 return;
         }
 
-        cfs_read_unlock(g_lock);
-        /* Re-try with a write lock */
-        cfs_write_lock(g_lock);
+       read_unlock(g_lock);
+       /* Re-try with a write lock */
+       write_lock(g_lock);
 
         peer = kiblnd_find_peer_locked(nid);
         if (peer != NULL) {
@@ -1378,12 +1378,12 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                         if (tx != NULL)
                                 cfs_list_add_tail(&tx->tx_list,
                                                   &peer->ibp_tx_queue);
-                        cfs_write_unlock_irqrestore(g_lock, flags);
-                } else {
-                        conn = kiblnd_get_conn_locked(peer);
-                        kiblnd_conn_addref(conn); /* 1 ref for me... */
+                       write_unlock_irqrestore(g_lock, flags);
+               } else {
+                       conn = kiblnd_get_conn_locked(peer);
+                       kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                         if (tx != NULL)
                                 kiblnd_queue_tx(tx, conn);
@@ -1392,7 +1392,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 return;
         }
 
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_unlock_irqrestore(g_lock, flags);
 
         /* Allocate a peer ready to add to the peer table and retry */
         rc = kiblnd_create_peer(ni, &peer, nid);
@@ -1406,7 +1406,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                 return;
         }
 
-        cfs_write_lock_irqsave(g_lock, flags);
+       write_lock_irqsave(g_lock, flags);
 
         peer2 = kiblnd_find_peer_locked(nid);
         if (peer2 != NULL) {
@@ -1417,12 +1417,12 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
                         if (tx != NULL)
                                 cfs_list_add_tail(&tx->tx_list,
                                                   &peer2->ibp_tx_queue);
-                        cfs_write_unlock_irqrestore(g_lock, flags);
-                } else {
-                        conn = kiblnd_get_conn_locked(peer2);
-                        kiblnd_conn_addref(conn); /* 1 ref for me... */
+                       write_unlock_irqrestore(g_lock, flags);
+               } else {
+                       conn = kiblnd_get_conn_locked(peer2);
+                       kiblnd_conn_addref(conn); /* 1 ref for me... */
 
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                         if (tx != NULL)
                                 kiblnd_queue_tx(tx, conn);
@@ -1446,7 +1446,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
         kiblnd_peer_addref(peer);
         cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
 
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_unlock_irqrestore(g_lock, flags);
 
         kiblnd_connect_peer(peer);
         kiblnd_peer_decref(peer);
@@ -1831,7 +1831,7 @@ kiblnd_peer_notify (kib_peer_t *peer)
         cfs_time_t    last_alive = 0;
         unsigned long flags;
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         if (cfs_list_empty(&peer->ibp_conns) &&
             peer->ibp_accepting == 0 &&
@@ -1843,7 +1843,7 @@ kiblnd_peer_notify (kib_peer_t *peer)
                 last_alive = peer->ibp_last_alive;
         }
 
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         if (error != 0)
                 lnet_notify(peer->ibp_ni,
@@ -1910,48 +1910,47 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error)
                 cfs_waitq_signal(&kiblnd_data.kib_failover_waitq);
         }
 
-        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+       spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
-        cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
-        cfs_waitq_signal (&kiblnd_data.kib_connd_waitq);
+       cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
+       cfs_waitq_signal(&kiblnd_data.kib_connd_waitq);
 
-        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+       spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 }
 
 void
-kiblnd_close_conn (kib_conn_t *conn, int error)
+kiblnd_close_conn(kib_conn_t *conn, int error)
 {
-        unsigned long flags;
+       unsigned long flags;
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        kiblnd_close_conn_locked(conn, error);
+       kiblnd_close_conn_locked(conn, error);
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 }
 
 void
 kiblnd_handle_early_rxs(kib_conn_t *conn)
 {
-        unsigned long    flags;
-        kib_rx_t        *rx;
+       unsigned long    flags;
+       kib_rx_t        *rx;
 
-        LASSERT (!cfs_in_interrupt());
-        LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+       LASSERT(!cfs_in_interrupt());
+       LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        while (!cfs_list_empty(&conn->ibc_early_rxs)) {
-                rx = cfs_list_entry(conn->ibc_early_rxs.next,
-                                kib_rx_t, rx_list);
-                cfs_list_del(&rx->rx_list);
-                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                            flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       while (!cfs_list_empty(&conn->ibc_early_rxs)) {
+               rx = cfs_list_entry(conn->ibc_early_rxs.next,
+                                   kib_rx_t, rx_list);
+               cfs_list_del(&rx->rx_list);
+               write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-                kiblnd_handle_rx(rx);
+               kiblnd_handle_rx(rx);
 
-                cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+               write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
         }
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 }
 
 void
@@ -1962,7 +1961,7 @@ kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs)
         cfs_list_t          *nxt;
         kib_tx_t            *tx;
 
-        cfs_spin_lock(&conn->ibc_lock);
+       spin_lock(&conn->ibc_lock);
 
         cfs_list_for_each_safe (tmp, nxt, txs) {
                 tx = cfs_list_entry (tmp, kib_tx_t, tx_list);
@@ -1985,10 +1984,9 @@ kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs)
                 }
         }
 
-        cfs_spin_unlock(&conn->ibc_lock);
+       spin_unlock(&conn->ibc_lock);
 
-        kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
-                           &zombies, -ECONNABORTED);
+       kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED);
 }
 
 void
@@ -2025,7 +2023,7 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
         LASSERT (error != 0);
         LASSERT (!cfs_in_interrupt());
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         if (active) {
                 LASSERT (peer->ibp_connecting > 0);
@@ -2038,7 +2036,7 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
         if (peer->ibp_connecting != 0 ||
             peer->ibp_accepting != 0) {
                 /* another connection attempt under way... */
-                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+               write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
                                             flags);
                 return;
         }
@@ -2057,7 +2055,7 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
                 LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
         }
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         kiblnd_peer_notify(peer);
 
@@ -2102,7 +2100,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
         }
 
         /* connection established */
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         conn->ibc_last_send = jiffies;
         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
@@ -2140,30 +2138,29 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
 
                 /* start to shut down connection */
                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
-                cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
-                                            flags);
+               write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-                kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
+               kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
 
-                return;
-        }
+               return;
+       }
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
-        /* Schedule blocked txs */
-        cfs_spin_lock (&conn->ibc_lock);
-        while (!cfs_list_empty (&txs)) {
-                tx = cfs_list_entry (txs.next, kib_tx_t, tx_list);
-                cfs_list_del(&tx->tx_list);
+       /* Schedule blocked txs */
+       spin_lock(&conn->ibc_lock);
+       while (!cfs_list_empty(&txs)) {
+               tx = cfs_list_entry(txs.next, kib_tx_t, tx_list);
+               cfs_list_del(&tx->tx_list);
 
-                kiblnd_queue_tx_locked(tx, conn);
-        }
-        cfs_spin_unlock (&conn->ibc_lock);
+               kiblnd_queue_tx_locked(tx, conn);
+       }
+       spin_unlock(&conn->ibc_lock);
 
-        kiblnd_check_sends(conn);
+       kiblnd_check_sends(conn);
 
-        /* schedule blocked rxs */
-        kiblnd_handle_early_rxs(conn);
+       /* schedule blocked rxs */
+       kiblnd_handle_early_rxs(conn);
 }
 
 void
@@ -2180,7 +2177,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
 int
 kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
 {
-        cfs_rwlock_t          *g_lock = &kiblnd_data.kib_global_lock;
+       rwlock_t                *g_lock = &kiblnd_data.kib_global_lock;
         kib_msg_t             *reqmsg = priv;
         kib_msg_t             *ackmsg;
         kib_dev_t             *ibdev;
@@ -2326,7 +2323,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 goto failed;
         }
 
-        cfs_write_lock_irqsave(g_lock, flags);
+       write_lock_irqsave(g_lock, flags);
 
         peer2 = kiblnd_find_peer_locked(nid);
         if (peer2 != NULL) {
@@ -2339,7 +2336,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
                     peer2->ibp_version     != version) {
                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                         CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
                               libcfs_nid2str(nid), peer2->ibp_version, version);
@@ -2352,7 +2349,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 /* tie-break connection race in favour of the higher NID */
                 if (peer2->ibp_connecting != 0 &&
                     nid < ni->ni_nid) {
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                         CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
 
@@ -2364,7 +2361,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 peer2->ibp_accepting++;
                 kiblnd_peer_addref(peer2);
 
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
                 kiblnd_peer_decref(peer);
                 peer = peer2;
         } else {
@@ -2383,7 +2380,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
                 kiblnd_peer_addref(peer);
                 cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
 
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
         }
 
         conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
@@ -2463,7 +2460,7 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
         LASSERT (peer->ibp_connecting > 0);     /* 'conn' at least */
 
-        cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         /* retry connection if it's still needed and no other connection
          * attempts (active or passive) are in progress
@@ -2481,7 +2478,7 @@ kiblnd_reconnect (kib_conn_t *conn, int version,
                 peer->ibp_incarnation = incarnation;
         }
 
-        cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         if (!retry)
                 return;
@@ -2713,13 +2710,13 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
                 goto failed;
         }
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-        if (msg->ibm_dstnid == ni->ni_nid &&
-            msg->ibm_dststamp == net->ibn_incarnation)
-                rc = 0;
-        else
-                rc = -ESTALE;
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       if (msg->ibm_dstnid == ni->ni_nid &&
+           msg->ibm_dststamp == net->ibn_incarnation)
+               rc = 0;
+       else
+               rc = -ESTALE;
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         if (rc != 0) {
                 CERROR("Bad connection reply from %s, rc = %d, "
@@ -2761,12 +2758,13 @@ kiblnd_active_connect (struct rdma_cm_id *cmid)
         unsigned long            flags;
         int                      rc;
 
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
-        incarnation = peer->ibp_incarnation;
-        version     = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
+       incarnation = peer->ibp_incarnation;
+       version     = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
+                                                peer->ibp_version;
 
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
         if (conn == NULL) {
@@ -3038,7 +3036,7 @@ kiblnd_check_conns (int idx)
         /* NB. We expect to have a look at all the peers and not find any
          * RDMAs to time out, so we just use a shared lock while we
          * take a look... */
-        cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+       read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
         cfs_list_for_each (ptmp, peers) {
                 peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list);
@@ -3051,12 +3049,12 @@ kiblnd_check_conns (int idx)
 
                         LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
 
-                        cfs_spin_lock(&conn->ibc_lock);
+                       spin_lock(&conn->ibc_lock);
 
-                        sendnoop = kiblnd_need_noop(conn);
-                        timedout = kiblnd_conn_timed_out_locked(conn);
-                        if (!sendnoop && !timedout) {
-                                cfs_spin_unlock(&conn->ibc_lock);
+                       sendnoop = kiblnd_need_noop(conn);
+                       timedout = kiblnd_conn_timed_out_locked(conn);
+                       if (!sendnoop && !timedout) {
+                               spin_unlock(&conn->ibc_lock);
                                 continue;
                         }
 
@@ -3077,11 +3075,11 @@ kiblnd_check_conns (int idx)
                         /* +ref for 'closes' or 'checksends' */
                         kiblnd_conn_addref(conn);
 
-                        cfs_spin_unlock(&conn->ibc_lock);
-                }
-        }
+                       spin_unlock(&conn->ibc_lock);
+               }
+       }
 
-        cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+       read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
 
         /* Handle timeout by closing the whole
          * connection. We can only be sure RDMA activity
@@ -3137,7 +3135,7 @@ kiblnd_connd (void *arg)
         cfs_waitlink_init (&wait);
         kiblnd_data.kib_connd = current;
 
-        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+       spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
 
         while (!kiblnd_data.kib_shutdown) {
 
@@ -3149,30 +3147,28 @@ kiblnd_connd (void *arg)
                                               kib_conn_t, ibc_list);
                         cfs_list_del(&conn->ibc_list);
 
-                        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
-                                                   flags);
-                        dropped_lock = 1;
+                       spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
+                                              flags);
+                       dropped_lock = 1;
 
-                        kiblnd_destroy_conn(conn);
+                       kiblnd_destroy_conn(conn);
 
-                        cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
-                                               flags);
-                }
+                       spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+               }
 
-                if (!cfs_list_empty (&kiblnd_data.kib_connd_conns)) {
-                        conn = cfs_list_entry (kiblnd_data.kib_connd_conns.next,
-                                               kib_conn_t, ibc_list);
-                        cfs_list_del(&conn->ibc_list);
+               if (!cfs_list_empty(&kiblnd_data.kib_connd_conns)) {
+                       conn = cfs_list_entry(kiblnd_data.kib_connd_conns.next,
+                                             kib_conn_t, ibc_list);
+                       cfs_list_del(&conn->ibc_list);
 
-                        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock,
-                                                    flags);
-                        dropped_lock = 1;
+                       spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock,
+                                              flags);
+                       dropped_lock = 1;
 
-                        kiblnd_disconnect_conn(conn);
-                        kiblnd_conn_decref(conn);
+                       kiblnd_disconnect_conn(conn);
+                       kiblnd_conn_decref(conn);
 
-                        cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock,
-                                               flags);
+                       spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
                 }
 
                 /* careful with the jiffy wrap... */
@@ -3182,7 +3178,7 @@ kiblnd_connd (void *arg)
                         const int p = 1;
                         int       chunk = kiblnd_data.kib_peer_hash_size;
 
-                        cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+                       spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
                         dropped_lock = 1;
 
                         /* Time to check for RDMA timeouts on a few more
@@ -3206,29 +3202,28 @@ kiblnd_connd (void *arg)
                         }
 
                         deadline += p * CFS_HZ;
-                        cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock,
-                                              flags);
-                }
+                       spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+               }
 
-                if (dropped_lock)
-                        continue;
+               if (dropped_lock)
+                       continue;
 
-                /* Nothing to do for 'timeout'  */
-                cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add (&kiblnd_data.kib_connd_waitq, &wait);
-                cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+               /* Nothing to do for 'timeout'  */
+               cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+               cfs_waitq_add(&kiblnd_data.kib_connd_waitq, &wait);
+               spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 
-                cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+               cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
 
-                cfs_set_current_state (CFS_TASK_RUNNING);
-                cfs_waitq_del (&kiblnd_data.kib_connd_waitq, &wait);
-                cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
-        }
+               cfs_set_current_state(CFS_TASK_RUNNING);
+               cfs_waitq_del(&kiblnd_data.kib_connd_waitq, &wait);
+               spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+       }
 
-        cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+       spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
 
-        kiblnd_thread_fini();
-        return (0);
+       kiblnd_thread_fini();
+       return 0;
 }
 
 void
@@ -3292,7 +3287,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
 
        LASSERT(cq == conn->ibc_cq);
 
-       cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+       spin_lock_irqsave(&sched->ibs_lock, flags);
 
        conn->ibc_ready = 1;
 
@@ -3307,7 +3302,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
                        cfs_waitq_signal(&sched->ibs_waitq);
        }
 
-       cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+       spin_unlock_irqrestore(&sched->ibs_lock, flags);
 }
 
 void
@@ -3351,16 +3346,16 @@ kiblnd_scheduler(void *arg)
                      "performance\n", name, sched->ibs_cpt);
        }
 
-       cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+       spin_lock_irqsave(&sched->ibs_lock, flags);
 
        while (!kiblnd_data.kib_shutdown) {
                if (busy_loops++ >= IBLND_RESCHED) {
-                       cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+                       spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                        cfs_cond_resched();
                        busy_loops = 0;
 
-                       cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+                       spin_lock_irqsave(&sched->ibs_lock, flags);
                }
 
                did_something = 0;
@@ -3373,7 +3368,7 @@ kiblnd_scheduler(void *arg)
                        cfs_list_del(&conn->ibc_sched_list);
                        conn->ibc_ready = 0;
 
-                       cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+                       spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
                         if (rc == 0) {
@@ -3385,7 +3380,7 @@ kiblnd_scheduler(void *arg)
                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
                                         kiblnd_close_conn(conn, -EIO);
                                         kiblnd_conn_decref(conn);
-                                       cfs_spin_lock_irqsave(&sched->ibs_lock,
+                                       spin_lock_irqsave(&sched->ibs_lock,
                                                              flags);
                                        continue;
                                }
@@ -3400,11 +3395,11 @@ kiblnd_scheduler(void *arg)
                                      rc);
                                kiblnd_close_conn(conn, -EIO);
                                kiblnd_conn_decref(conn);
-                               cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+                               spin_lock_irqsave(&sched->ibs_lock, flags);
                                continue;
                        }
 
-                       cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+                       spin_lock_irqsave(&sched->ibs_lock, flags);
 
                        if (rc != 0 || conn->ibc_ready) {
                                /* There may be another completion waiting; get
@@ -3421,11 +3416,10 @@ kiblnd_scheduler(void *arg)
                        }
 
                        if (rc != 0) {
-                               cfs_spin_unlock_irqrestore(&sched->ibs_lock,
-                                                          flags);
+                               spin_unlock_irqrestore(&sched->ibs_lock, flags);
                                kiblnd_complete(&wc);
 
-                               cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+                               spin_lock_irqsave(&sched->ibs_lock, flags);
                         }
 
                         kiblnd_conn_decref(conn); /* ...drop my ref from above */
@@ -3437,17 +3431,17 @@ kiblnd_scheduler(void *arg)
 
                cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                cfs_waitq_add_exclusive(&sched->ibs_waitq, &wait);
-               cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+               spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
                cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
                busy_loops = 0;
 
                cfs_waitq_del(&sched->ibs_waitq, &wait);
                cfs_set_current_state(CFS_TASK_RUNNING);
-               cfs_spin_lock_irqsave(&sched->ibs_lock, flags);
+               spin_lock_irqsave(&sched->ibs_lock, flags);
        }
 
-       cfs_spin_unlock_irqrestore(&sched->ibs_lock, flags);
+       spin_unlock_irqrestore(&sched->ibs_lock, flags);
 
        kiblnd_thread_fini();
        return 0;
@@ -3456,7 +3450,7 @@ kiblnd_scheduler(void *arg)
 int
 kiblnd_failover_thread(void *arg)
 {
-        cfs_rwlock_t      *glock = &kiblnd_data.kib_global_lock;
+       rwlock_t                *glock = &kiblnd_data.kib_global_lock;
         kib_dev_t         *dev;
         cfs_waitlink_t     wait;
         unsigned long      flags;
@@ -3468,7 +3462,7 @@ kiblnd_failover_thread(void *arg)
         cfs_block_allsigs ();
 
         cfs_waitlink_init(&wait);
-        cfs_write_lock_irqsave(glock, flags);
+       write_lock_irqsave(glock, flags);
 
         while (!kiblnd_data.kib_shutdown) {
                 int     do_failover = 0;
@@ -3486,11 +3480,11 @@ kiblnd_failover_thread(void *arg)
                 if (do_failover) {
                         cfs_list_del_init(&dev->ibd_fail_list);
                         dev->ibd_failover = 1;
-                        cfs_write_unlock_irqrestore(glock, flags);
+                       write_unlock_irqrestore(glock, flags);
 
-                        rc = kiblnd_dev_failover(dev);
+                       rc = kiblnd_dev_failover(dev);
 
-                        cfs_write_lock_irqsave(glock, flags);
+                       write_lock_irqsave(glock, flags);
 
                         LASSERT (dev->ibd_failover);
                         dev->ibd_failover = 0;
@@ -3515,13 +3509,13 @@ kiblnd_failover_thread(void *arg)
 
                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                 cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait);
-                cfs_write_unlock_irqrestore(glock, flags);
+               write_unlock_irqrestore(glock, flags);
 
-                rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
-                                                   cfs_time_seconds(1));
-                cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
-                cfs_write_lock_irqsave(glock, flags);
+               rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+                                                  cfs_time_seconds(1));
+               cfs_set_current_state(CFS_TASK_RUNNING);
+               cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait);
+               write_lock_irqsave(glock, flags);
 
                 if (!long_sleep || rc != 0)
                         continue;
@@ -3538,7 +3532,7 @@ kiblnd_failover_thread(void *arg)
                 }
         }
 
-        cfs_write_unlock_irqrestore(glock, flags);
+       write_unlock_irqrestore(glock, flags);
 
         kiblnd_thread_fini();
         return 0;
index 42d82f7..723ec7c 100644 (file)
@@ -59,12 +59,12 @@ kptllnd_ptlid2str(ptl_process_id_t id)
 
         unsigned long  flags;
         char          *str;
-        
-        cfs_spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
+
+       spin_lock_irqsave(&kptllnd_data.kptl_ptlid2str_lock, flags);
         str = strs[idx++];
         if (idx >= sizeof(strs)/sizeof(strs[0]))
                 idx = 0;
-        cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
+       spin_unlock_irqrestore(&kptllnd_data.kptl_ptlid2str_lock, flags);
 
         snprintf(str, sizeof(strs[0]), FMT_PTLID, id.pid, id.nid);
         return str;
@@ -491,10 +491,10 @@ kptllnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
         if (kptllnd_find_target(net, id, &peer) != 0)
                 return;
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
         if (peer->peer_last_alive != 0)
                 *when = peer->peer_last_alive;
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
         kptllnd_peer_decref(peer);
         return;
 }
@@ -507,9 +507,9 @@ kptllnd_base_shutdown (void)
         unsigned long     flags;
         lnet_process_id_t process_id;
 
-        cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+       read_lock(&kptllnd_data.kptl_net_rw_lock);
         LASSERT (cfs_list_empty(&kptllnd_data.kptl_nets));
-        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+       read_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         switch (kptllnd_data.kptl_init) {
         default:
@@ -523,11 +523,11 @@ kptllnd_base_shutdown (void)
                 LASSERT (cfs_list_empty(&kptllnd_data.kptl_sched_rxbq));
 
                 /* lock to interleave cleanly with peer birth/death */
-                cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+               write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
                 LASSERT (kptllnd_data.kptl_shutdown == 0);
                 kptllnd_data.kptl_shutdown = 1; /* phase 1 == destroy peers */
                 /* no new peers possible now */
-                cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+               write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
                                             flags);
 
                 /* nuke all existing peers */
@@ -535,7 +535,7 @@ kptllnd_base_shutdown (void)
                 process_id.pid = LNET_PID_ANY;
                 kptllnd_peer_del(process_id);
 
-                cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+               read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
                 LASSERT (kptllnd_data.kptl_n_active_peers == 0);
 
@@ -546,12 +546,12 @@ kptllnd_base_shutdown (void)
                                "Waiting for %d peers to terminate\n",
                                kptllnd_data.kptl_npeers);
 
-                        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+                       read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
                                                    flags);
 
                         cfs_pause(cfs_time_seconds(1));
 
-                        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
+                       read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock,
                                               flags);
                 }
 
@@ -561,7 +561,7 @@ kptllnd_base_shutdown (void)
                 for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++)
                         LASSERT (cfs_list_empty (&kptllnd_data.kptl_peers[i]));
 
-                cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+               read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
                                            flags);
                 CDEBUG(D_NET, "All peers deleted\n");
 
@@ -672,21 +672,21 @@ kptllnd_base_startup (void)
         kptllnd_data.kptl_eqh = PTL_INVALID_HANDLE;
         kptllnd_data.kptl_nih = PTL_INVALID_HANDLE;
 
-        cfs_rwlock_init(&kptllnd_data.kptl_net_rw_lock);
+       rwlock_init(&kptllnd_data.kptl_net_rw_lock);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_nets);
 
         /* Setup the sched locks/lists/waitq */
-        cfs_spin_lock_init(&kptllnd_data.kptl_sched_lock);
+       spin_lock_init(&kptllnd_data.kptl_sched_lock);
         cfs_waitq_init(&kptllnd_data.kptl_sched_waitq);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_txq);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxq);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_sched_rxbq);
 
         /* Init kptl_ptlid2str_lock before any call to kptllnd_ptlid2str */
-        cfs_spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
+       spin_lock_init(&kptllnd_data.kptl_ptlid2str_lock);
 
         /* Setup the tx locks/lists */
-        cfs_spin_lock_init(&kptllnd_data.kptl_tx_lock);
+       spin_lock_init(&kptllnd_data.kptl_tx_lock);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_idle_txs);
         cfs_atomic_set(&kptllnd_data.kptl_ntx, 0);
 
@@ -771,7 +771,7 @@ kptllnd_base_startup (void)
         kptllnd_data.kptl_nak_msg->ptlm_srcpid   = the_lnet.ln_pid;
         kptllnd_data.kptl_nak_msg->ptlm_srcstamp = kptllnd_data.kptl_incarnation;
 
-        cfs_rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
+       rwlock_init(&kptllnd_data.kptl_peer_rw_lock);
         cfs_waitq_init(&kptllnd_data.kptl_watchdog_waitq);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_closing_peers);
         CFS_INIT_LIST_HEAD(&kptllnd_data.kptl_zombie_peers);
@@ -905,9 +905,9 @@ kptllnd_startup (lnet_ni_t *ni)
         kptllnd_data.kptl_nak_msg->ptlm_srcnid = ni->ni_nid;
 
         cfs_atomic_set(&net->net_refcount, 1);
-        cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
+       write_lock(&kptllnd_data.kptl_net_rw_lock);
         cfs_list_add_tail(&net->net_list, &kptllnd_data.kptl_nets);
-        cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
+       write_unlock(&kptllnd_data.kptl_net_rw_lock);
         return 0;
 
  failed:
@@ -937,15 +937,15 @@ kptllnd_shutdown (lnet_ni_t *ni)
         ni->ni_data = NULL;
         net->net_ni = NULL;
 
-        cfs_write_lock(&kptllnd_data.kptl_net_rw_lock);
+       write_lock(&kptllnd_data.kptl_net_rw_lock);
         kptllnd_net_decref(net);
         cfs_list_del_init(&net->net_list);
-        cfs_write_unlock(&kptllnd_data.kptl_net_rw_lock);
+       write_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         /* Can't nuke peers here - they are shared among all NIs */
-        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
         net->net_shutdown = 1;   /* Order with peer creation */
-        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         i = 2;
         while (cfs_atomic_read(&net->net_refcount) != 0) {
index 7862952..439a770 100644 (file)
@@ -147,7 +147,7 @@ typedef struct kptl_rx                          /* receive message */
 
 typedef struct kptl_rx_buffer_pool
 {
-        cfs_spinlock_t          rxbp_lock;
+       spinlock_t              rxbp_lock;
         cfs_list_t              rxbp_list;      /* all allocated buffers */
         int                     rxbp_count;     /* # allocated buffers */
         int                     rxbp_reserved;  /* # requests to buffer */
@@ -225,7 +225,7 @@ struct kptl_peer
         cfs_list_t              peer_list;
         cfs_atomic_t            peer_refcount;          /* The current references */
         enum kptllnd_peer_state peer_state;
-        cfs_spinlock_t          peer_lock;              /* serialize */
+       spinlock_t              peer_lock;              /* serialize */
         cfs_list_t              peer_noops;             /* PTLLND_MSG_TYPE_NOOP txs */
         cfs_list_t              peer_sendq;             /* txs waiting for mh handles */
         cfs_list_t              peer_activeq;           /* txs awaiting completion */
@@ -256,10 +256,10 @@ struct kptl_data
         __u64                   kptl_incarnation;      /* which one am I */
         ptl_handle_eq_t         kptl_eqh;              /* Event Queue (EQ) */
 
-        cfs_rwlock_t            kptl_net_rw_lock;      /* serialise... */
-        cfs_list_t              kptl_nets;             /* kptl_net instances */
+       rwlock_t                kptl_net_rw_lock;       /* serialise... */
+       cfs_list_t              kptl_nets;              /* kptl_net instance*/
 
-        cfs_spinlock_t          kptl_sched_lock;       /* serialise... */
+       spinlock_t              kptl_sched_lock;        /* serialise... */
         cfs_waitq_t             kptl_sched_waitq;      /* schedulers sleep here */
         cfs_list_t              kptl_sched_txq;        /* tx requiring attention */
         cfs_list_t              kptl_sched_rxq;        /* rx requiring attention */
@@ -271,10 +271,10 @@ struct kptl_data
         cfs_mem_cache_t*        kptl_rx_cache;         /* rx descripter cache */
 
         cfs_atomic_t            kptl_ntx;              /* # tx descs allocated */
-        cfs_spinlock_t          kptl_tx_lock;          /* serialise idle tx list*/
-        cfs_list_t              kptl_idle_txs;         /* idle tx descriptors */
+       spinlock_t              kptl_tx_lock;        /* serialise idle tx list*/
+       cfs_list_t              kptl_idle_txs;       /* idle tx descriptors */
 
-        cfs_rwlock_t            kptl_peer_rw_lock;     /* lock for peer table */
+       rwlock_t                kptl_peer_rw_lock;   /* lock for peer table */
         cfs_list_t             *kptl_peers;            /* hash table of all my known peers */
         cfs_list_t              kptl_closing_peers;    /* peers being closed */
         cfs_list_t              kptl_zombie_peers;     /* peers waiting for refs to drain */
@@ -284,7 +284,7 @@ struct kptl_data
         int                     kptl_expected_peers;   /* # peers I can buffer HELLOs from */
 
         kptl_msg_t             *kptl_nak_msg;          /* common NAK message */
-        cfs_spinlock_t          kptl_ptlid2str_lock;   /* serialise str ops */
+       spinlock_t              kptl_ptlid2str_lock;    /* serialise str ops */
 };
 
 struct kptl_net
@@ -382,25 +382,25 @@ kptllnd_rx_buffer_size(void)
 static inline void
 kptllnd_rx_buffer_addref(kptl_rx_buffer_t *rxb)
 {
-        unsigned long flags;
+       unsigned long flags;
 
-        cfs_spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
-        rxb->rxb_refcount++;
-        cfs_spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
+       spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
+       rxb->rxb_refcount++;
+       spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
 }
 
 static inline void
 kptllnd_rx_buffer_decref_locked(kptl_rx_buffer_t *rxb)
 {
-        if (--(rxb->rxb_refcount) == 0) {
-                cfs_spin_lock(&kptllnd_data.kptl_sched_lock);
+       if (--(rxb->rxb_refcount) == 0) {
+               spin_lock(&kptllnd_data.kptl_sched_lock);
 
-                cfs_list_add_tail(&rxb->rxb_repost_list,
-                                  &kptllnd_data.kptl_sched_rxbq);
-                cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
+               cfs_list_add_tail(&rxb->rxb_repost_list,
+                                 &kptllnd_data.kptl_sched_rxbq);
+               cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
 
-                cfs_spin_unlock(&kptllnd_data.kptl_sched_lock);
-        }
+               spin_unlock(&kptllnd_data.kptl_sched_lock);
+       }
 }
 
 static inline void
@@ -409,9 +409,9 @@ kptllnd_rx_buffer_decref(kptl_rx_buffer_t *rxb)
         unsigned long flags;
         int           count;
 
-        cfs_spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
-        count = --(rxb->rxb_refcount);
-        cfs_spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
+       spin_lock_irqsave(&rxb->rxb_pool->rxbp_lock, flags);
+       count = --(rxb->rxb_refcount);
+       spin_unlock_irqrestore(&rxb->rxb_pool->rxbp_lock, flags);
 
         if (count == 0)
                 kptllnd_rx_buffer_post(rxb);
@@ -503,9 +503,9 @@ kptllnd_id2peer(lnet_process_id_t id)
         kptl_peer_t   *peer;
         unsigned long  flags;
 
-        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
-        peer = kptllnd_id2peer_locked(id);
-        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       peer = kptllnd_id2peer_locked(id);
+       read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         return peer;
 }
index d78001d..5319dfb 100644 (file)
@@ -255,7 +255,7 @@ kptllnd_active_rdma(kptl_rx_t *rx, lnet_msg_t *lntmsg, int type,
                 return -EIO;
         }
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         tx->tx_lnet_msg = lntmsg;
         /* lnet_finalize() will be called when tx is torn down, so I must
@@ -268,7 +268,7 @@ kptllnd_active_rdma(kptl_rx_t *rx, lnet_msg_t *lntmsg, int type,
 
         /* peer has now got my ref on 'tx' */
 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         tx->tx_tposted = jiffies;
 
@@ -753,7 +753,7 @@ kptllnd_scheduler (void *arg)
 
         cfs_waitlink_init(&waitlink);
 
-        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+       spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
         /* threads shut down in phase 2 after all peers have been destroyed */
         while (kptllnd_data.kptl_shutdown < 2) {
@@ -765,14 +765,14 @@ kptllnd_scheduler (void *arg)
                                              kptl_rx_t, rx_list);
                         cfs_list_del(&rx->rx_list);
 
-                        cfs_spin_unlock_irqrestore(&kptllnd_data. \
+                       spin_unlock_irqrestore(&kptllnd_data. \
                                                    kptl_sched_lock,
                                                    flags);
 
                         kptllnd_rx_parse(rx);
                         did_something = 1;
 
-                        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+                       spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
                                               flags);
                 }
 
@@ -782,14 +782,14 @@ kptllnd_scheduler (void *arg)
                                               rxb_repost_list);
                         cfs_list_del(&rxb->rxb_repost_list);
 
-                        cfs_spin_unlock_irqrestore(&kptllnd_data. \
+                       spin_unlock_irqrestore(&kptllnd_data. \
                                                    kptl_sched_lock,
                                                    flags);
 
                         kptllnd_rx_buffer_post(rxb);
                         did_something = 1;
 
-                        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+                       spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
                                               flags);
                 }
 
@@ -798,13 +798,13 @@ kptllnd_scheduler (void *arg)
                                              kptl_tx_t, tx_list);
                         cfs_list_del_init(&tx->tx_list);
 
-                        cfs_spin_unlock_irqrestore(&kptllnd_data. \
+                       spin_unlock_irqrestore(&kptllnd_data. \
                                                    kptl_sched_lock, flags);
 
                         kptllnd_tx_fini(tx);
                         did_something = 1;
 
-                        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+                       spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
                                               flags);
                 }
 
@@ -816,7 +816,7 @@ kptllnd_scheduler (void *arg)
                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                 cfs_waitq_add_exclusive(&kptllnd_data.kptl_sched_waitq,
                                         &waitlink);
-                cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+               spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
                                            flags);
 
                 if (!did_something)
@@ -827,12 +827,12 @@ kptllnd_scheduler (void *arg)
                 cfs_set_current_state(CFS_TASK_RUNNING);
                 cfs_waitq_del(&kptllnd_data.kptl_sched_waitq, &waitlink);
 
-                cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+               spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
                 counter = 0;
         }
 
-        cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
+       spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock, flags);
 
         kptllnd_thread_fini();
         return 0;
index f792de7..100f1c9 100644 (file)
@@ -62,14 +62,14 @@ kptllnd_get_peer_info(int index,
                       int *nsendq, int *nactiveq,
                       int *credits, int *outstanding_credits)
 {
-        cfs_rwlock_t     *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+       rwlock_t     *g_lock = &kptllnd_data.kptl_peer_rw_lock;
         unsigned long     flags;
         cfs_list_t       *ptmp;
         kptl_peer_t      *peer;
         int               i;
         int               rc = -ENOENT;
 
-        cfs_read_lock_irqsave(g_lock, flags);
+       read_lock_irqsave(g_lock, flags);
 
         for (i = 0; i < kptllnd_data.kptl_peer_hash_size; i++) {
                 cfs_list_for_each (ptmp, &kptllnd_data.kptl_peers[i]) {
@@ -84,7 +84,7 @@ kptllnd_get_peer_info(int index,
                         *refcount    = cfs_atomic_read(&peer->peer_refcount);
                         *incarnation = peer->peer_incarnation;
 
-                        cfs_spin_lock(&peer->peer_lock);
+                       spin_lock(&peer->peer_lock);
 
                         *next_matchbits      = peer->peer_next_matchbits;
                         *last_matchbits_seen = peer->peer_last_matchbits_seen;
@@ -94,7 +94,7 @@ kptllnd_get_peer_info(int index,
                         *nsendq   = kptllnd_count_queue(&peer->peer_sendq);
                         *nactiveq = kptllnd_count_queue(&peer->peer_activeq);
 
-                        cfs_spin_unlock(&peer->peer_lock);
+                       spin_unlock(&peer->peer_lock);
 
                         rc = 0;
                         goto out;
@@ -102,7 +102,7 @@ kptllnd_get_peer_info(int index,
         }
 
  out:
-        cfs_read_unlock_irqrestore(g_lock, flags);
+       read_unlock_irqrestore(g_lock, flags);
         return rc;
 }
 
@@ -179,7 +179,7 @@ kptllnd_peer_allocate (kptl_net_t *net, lnet_process_id_t lpid, ptl_process_id_t
         CFS_INIT_LIST_HEAD (&peer->peer_noops);
         CFS_INIT_LIST_HEAD (&peer->peer_sendq);
         CFS_INIT_LIST_HEAD (&peer->peer_activeq);
-        cfs_spin_lock_init (&peer->peer_lock);
+       spin_lock_init(&peer->peer_lock);
 
         peer->peer_state = PEER_STATE_ALLOCATED;
         peer->peer_error = 0;
@@ -194,21 +194,21 @@ kptllnd_peer_allocate (kptl_net_t *net, lnet_process_id_t lpid, ptl_process_id_t
 
         cfs_atomic_set(&peer->peer_refcount, 1);    /* 1 ref for caller */
 
-        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         peer->peer_myincarnation = kptllnd_data.kptl_incarnation;
 
         /* Only increase # peers under lock, to guarantee we dont grow it
          * during shutdown */
         if (net->net_shutdown) {
-                cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+               write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
                                             flags);
                 LIBCFS_FREE(peer, sizeof(*peer));
                 return NULL;
         }
 
         kptllnd_data.kptl_npeers++;
-        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
         return peer;
 }
 
@@ -227,14 +227,14 @@ kptllnd_peer_destroy (kptl_peer_t *peer)
         LASSERT (cfs_list_empty(&peer->peer_sendq));
         LASSERT (cfs_list_empty(&peer->peer_activeq));
 
-        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         if (peer->peer_state == PEER_STATE_ZOMBIE)
                 cfs_list_del(&peer->peer_list);
 
         kptllnd_data.kptl_npeers--;
 
-        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         LIBCFS_FREE (peer, sizeof (*peer));
 }
@@ -262,13 +262,13 @@ kptllnd_peer_cancel_txs(kptl_peer_t *peer, cfs_list_t *txs)
 {
         unsigned long   flags;
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         kptllnd_cancel_txlist(&peer->peer_noops, txs);
         kptllnd_cancel_txlist(&peer->peer_sendq, txs);
         kptllnd_cancel_txlist(&peer->peer_activeq, txs);
                 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 }
 
 void
@@ -290,7 +290,7 @@ kptllnd_peer_notify (kptl_peer_t *peer)
         int           error = 0;
         cfs_time_t    last_alive = 0;
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         if (peer->peer_error != 0) {
                 error = peer->peer_error;
@@ -298,15 +298,15 @@ kptllnd_peer_notify (kptl_peer_t *peer)
                 last_alive = peer->peer_last_alive;
         }
 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         if (error == 0)
                 return;
 
-        cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+       read_lock(&kptllnd_data.kptl_net_rw_lock);
         cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list)
                 nnets++;
-        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+       read_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         if (nnets == 0) /* shutdown in progress */
                 return;
@@ -318,7 +318,7 @@ kptllnd_peer_notify (kptl_peer_t *peer)
         }
         memset(nets, 0, nnets * sizeof(*nets));
 
-        cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+       read_lock(&kptllnd_data.kptl_net_rw_lock);
         i = 0;
         cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
                 LASSERT (i < nnets);
@@ -326,7 +326,7 @@ kptllnd_peer_notify (kptl_peer_t *peer)
                 kptllnd_net_addref(net);
                 i++;
         }
-        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+       read_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         for (i = 0; i < nnets; i++) {
                 lnet_nid_t peer_nid;
@@ -360,17 +360,17 @@ kptllnd_handle_closing_peers ()
 
         /* Check with a read lock first to avoid blocking anyone */
 
-        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
         idle = cfs_list_empty(&kptllnd_data.kptl_closing_peers) &&
                cfs_list_empty(&kptllnd_data.kptl_zombie_peers);
-        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         if (idle)
                 return;
 
         CFS_INIT_LIST_HEAD(&txs);
 
-        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         /* Cancel txs on all zombie peers.  NB anyone dropping the last peer
          * ref removes it from this list, so I musn't drop the lock while
@@ -397,17 +397,17 @@ kptllnd_handle_closing_peers ()
                                   &kptllnd_data.kptl_zombie_peers);
                 peer->peer_state = PEER_STATE_ZOMBIE;
 
-                cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+               write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
                                             flags);
 
                 kptllnd_peer_notify(peer);
                 kptllnd_peer_cancel_txs(peer, &txs);
                 kptllnd_peer_decref(peer);
 
-                cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+               write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
         }
 
-        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         /* Drop peer's ref on all cancelled txs.  This will get
          * kptllnd_tx_fini() to abort outstanding comms if necessary. */
@@ -460,9 +460,9 @@ kptllnd_peer_close(kptl_peer_t *peer, int why)
 {
         unsigned long      flags;
 
-        cfs_write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
         kptllnd_peer_close_locked(peer, why);
-        cfs_write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       write_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 }
 
 int
@@ -494,7 +494,7 @@ kptllnd_peer_del(lnet_process_id_t id)
         }
 
 again:
-        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         for (i = lo; i <= hi; i++) {
                 cfs_list_for_each_safe (ptmp, pnxt,
@@ -509,7 +509,7 @@ again:
 
                         kptllnd_peer_addref(peer); /* 1 ref for me... */
 
-                        cfs_read_unlock_irqrestore(&kptllnd_data. \
+                       read_unlock_irqrestore(&kptllnd_data. \
                                                    kptl_peer_rw_lock,
                                                    flags);
 
@@ -523,7 +523,7 @@ again:
                 }
         }
 
-        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         return (rc);
 }
@@ -534,7 +534,7 @@ kptllnd_queue_tx(kptl_peer_t *peer, kptl_tx_t *tx)
         /* CAVEAT EMPTOR: I take over caller's ref on 'tx' */
         unsigned long flags;
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         /* Ensure HELLO is sent first */
         if (tx->tx_msg->ptlm_type == PTLLND_MSG_TYPE_NOOP)
@@ -544,7 +544,7 @@ kptllnd_queue_tx(kptl_peer_t *peer, kptl_tx_t *tx)
         else
                 cfs_list_add_tail(&tx->tx_list, &peer->peer_sendq);
 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 }
 
 
@@ -672,13 +672,13 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
 
         LASSERT(!cfs_in_interrupt());
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         peer->peer_retry_noop = 0;
 
         if (kptllnd_peer_send_noop(peer)) {
                 /* post a NOOP to return credits */
-                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+               spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                 tx = kptllnd_get_idle_tx(TX_TYPE_SMALL_MESSAGE);
                 if (tx == NULL) {
@@ -690,7 +690,7 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
                         kptllnd_post_tx(peer, tx, 0);
                 }
 
-                cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+               spin_lock_irqsave(&peer->peer_lock, flags);
                 peer->peer_retry_noop = (tx == NULL);
         }
 
@@ -760,13 +760,13 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
                     !kptllnd_peer_send_noop(peer)) {
                         tx->tx_active = 0;
 
-                        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+                       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                         CDEBUG(D_NET, "%s: redundant noop\n", 
                                libcfs_id2str(peer->peer_id));
                         kptllnd_tx_decref(tx);
 
-                        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+                       spin_lock_irqsave(&peer->peer_lock, flags);
                         continue;
                 }
 
@@ -797,7 +797,7 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
 
                 kptllnd_tx_addref(tx);          /* 1 ref for me... */
 
-                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+               spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                 if (tx->tx_type == TX_TYPE_PUT_REQUEST ||
                     tx->tx_type == TX_TYPE_GET_REQUEST) {
@@ -853,10 +853,10 @@ kptllnd_peer_check_sends (kptl_peer_t *peer)
 
                 kptllnd_tx_decref(tx);          /* drop my ref */
 
-                cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+               spin_lock_irqsave(&peer->peer_lock, flags);
         }
 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
         return;
 
  failed:
@@ -904,7 +904,7 @@ kptllnd_peer_check_bucket (int idx, int stamp)
 
  again:
         /* NB. Shared lock while I just look */
-        cfs_read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
+       read_lock_irqsave(&kptllnd_data.kptl_peer_rw_lock, flags);
 
         cfs_list_for_each_entry (peer, peers, peer_list) {
                 kptl_tx_t *tx;
@@ -917,11 +917,11 @@ kptllnd_peer_check_bucket (int idx, int stamp)
                        libcfs_id2str(peer->peer_id), peer->peer_credits, 
                        peer->peer_outstanding_credits, peer->peer_sent_credits);
 
-                cfs_spin_lock(&peer->peer_lock);
+               spin_lock(&peer->peer_lock);
 
                 if (peer->peer_check_stamp == stamp) {
                         /* checked already this pass */
-                        cfs_spin_unlock(&peer->peer_lock);
+                       spin_unlock(&peer->peer_lock);
                         continue;
                 }
 
@@ -939,14 +939,14 @@ kptllnd_peer_check_bucket (int idx, int stamp)
                         nactive = kptllnd_count_queue(&peer->peer_activeq);
                 }
 
-                cfs_spin_unlock(&peer->peer_lock);
+               spin_unlock(&peer->peer_lock);
 
                 if (tx == NULL && !check_sends)
                         continue;
 
                 kptllnd_peer_addref(peer); /* 1 ref for me... */
 
-                cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
+               read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock,
                                            flags);
 
                 if (tx == NULL) { /* nothing timed out */
@@ -1007,7 +1007,7 @@ kptllnd_peer_check_bucket (int idx, int stamp)
                 goto again;
         }
 
-        cfs_read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
+       read_unlock_irqrestore(&kptllnd_data.kptl_peer_rw_lock, flags);
 }
 
 kptl_peer_t *
@@ -1093,7 +1093,7 @@ kptl_peer_t *
 kptllnd_peer_handle_hello (kptl_net_t *net,
                            ptl_process_id_t initiator, kptl_msg_t *msg)
 {
-        cfs_rwlock_t       *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+       rwlock_t                *g_lock = &kptllnd_data.kptl_peer_rw_lock;
         kptl_peer_t        *peer;
         kptl_peer_t        *new_peer;
         lnet_process_id_t   lpid;
@@ -1144,7 +1144,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                 return NULL;
         }
         
-        cfs_write_lock_irqsave(g_lock, flags);
+       write_lock_irqsave(g_lock, flags);
 
         peer = kptllnd_id2peer_locked(lpid);
         if (peer != NULL) {
@@ -1154,7 +1154,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
 
                         if (msg->ptlm_dststamp != 0 &&
                             msg->ptlm_dststamp != peer->peer_myincarnation) {
-                                cfs_write_unlock_irqrestore(g_lock, flags);
+                               write_unlock_irqrestore(g_lock, flags);
 
                                 CERROR("Ignoring HELLO from %s: unexpected "
                                        "dststamp "LPX64" ("LPX64" wanted)\n",
@@ -1172,13 +1172,13 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                         peer->peer_max_msg_size =
                                 msg->ptlm_u.hello.kptlhm_max_msg_size;
                         
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
                         return peer;
                 }
 
                 if (msg->ptlm_dststamp != 0 &&
                     msg->ptlm_dststamp <= peer->peer_myincarnation) {
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                         CERROR("Ignoring stale HELLO from %s: "
                                "dststamp "LPX64" (current "LPX64")\n",
@@ -1195,7 +1195,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
 
         kptllnd_cull_peertable_locked(lpid);
 
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_unlock_irqrestore(g_lock, flags);
 
         if (peer != NULL) {
                 CDEBUG(D_NET, "Peer %s (%s) reconnecting:"
@@ -1233,11 +1233,11 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                 return NULL;
         }
 
-        cfs_write_lock_irqsave(g_lock, flags);
+       write_lock_irqsave(g_lock, flags);
 
  again:
         if (net->net_shutdown) {
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
 
                 CERROR ("Shutdown started, refusing connection from %s\n",
                         libcfs_id2str(lpid));
@@ -1259,14 +1259,14 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                         peer->peer_max_msg_size =
                                 msg->ptlm_u.hello.kptlhm_max_msg_size;
 
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                         CWARN("Outgoing instantiated peer %s\n",
                               libcfs_id2str(lpid));
                } else {
                        LASSERT (peer->peer_state == PEER_STATE_ACTIVE);
 
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
 
                        /* WOW!  Somehow this peer completed the HELLO
                         * handshake while I slept.  I guess I could have slept
@@ -1286,7 +1286,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
         if (kptllnd_data.kptl_n_active_peers ==
             kptllnd_data.kptl_expected_peers) {
                 /* peer table full */
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
 
                 kptllnd_peertable_overflow_msg("Connection from ", lpid);
 
@@ -1300,7 +1300,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
                         return NULL;
                 }
                 
-                cfs_write_lock_irqsave(g_lock, flags);
+               write_lock_irqsave(g_lock, flags);
                 kptllnd_data.kptl_expected_peers++;
                 goto again;
         }
@@ -1320,7 +1320,7 @@ kptllnd_peer_handle_hello (kptl_net_t *net,
         LASSERT (!net->net_shutdown);
         kptllnd_peer_add_peertable_locked(new_peer);
 
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_unlock_irqrestore(g_lock, flags);
 
        /* NB someone else could get in now and post a message before I post
         * the HELLO, but post_tx/check_sends take care of that! */
@@ -1345,7 +1345,7 @@ int
 kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
                     kptl_peer_t **peerp)
 {
-        cfs_rwlock_t     *g_lock = &kptllnd_data.kptl_peer_rw_lock;
+       rwlock_t     *g_lock = &kptllnd_data.kptl_peer_rw_lock;
         ptl_process_id_t  ptl_id;
         kptl_peer_t      *new_peer;
         kptl_tx_t        *hello_tx;
@@ -1354,9 +1354,9 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
         __u64             last_matchbits_seen;
 
         /* I expect to find the peer, so I only take a read lock... */
-        cfs_read_lock_irqsave(g_lock, flags);
+       read_lock_irqsave(g_lock, flags);
         *peerp = kptllnd_id2peer_locked(target);
-        cfs_read_unlock_irqrestore(g_lock, flags);
+       read_unlock_irqrestore(g_lock, flags);
 
         if (*peerp != NULL)
                 return 0;
@@ -1393,14 +1393,14 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
         if (rc != 0)
                 goto unwind_1;
 
-        cfs_write_lock_irqsave(g_lock, flags);
+       write_lock_irqsave(g_lock, flags);
  again:
         /* Called only in lnd_send which can't happen after lnd_shutdown */
         LASSERT (!net->net_shutdown);
 
         *peerp = kptllnd_id2peer_locked(target);
         if (*peerp != NULL) {
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
                 goto unwind_2;
         }
 
@@ -1409,7 +1409,7 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
         if (kptllnd_data.kptl_n_active_peers ==
             kptllnd_data.kptl_expected_peers) {
                 /* peer table full */
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
 
                 kptllnd_peertable_overflow_msg("Connection to ", target);
 
@@ -1420,7 +1420,7 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
                         rc = -ENOMEM;
                         goto unwind_2;
                 }
-                cfs_write_lock_irqsave(g_lock, flags);
+               write_lock_irqsave(g_lock, flags);
                 kptllnd_data.kptl_expected_peers++;
                 goto again;
         }
@@ -1436,7 +1436,7 @@ kptllnd_find_target(kptl_net_t *net, lnet_process_id_t target,
 
         kptllnd_peer_add_peertable_locked(new_peer);
 
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_unlock_irqrestore(g_lock, flags);
 
         /* NB someone else could get in now and post a message before I post
          * the HELLO, but post_tx/check_sends take care of that! */
index f12be7d..07ddae4 100644 (file)
@@ -42,7 +42,7 @@ void
 kptllnd_rx_buffer_pool_init(kptl_rx_buffer_pool_t *rxbp)
 {
         memset(rxbp, 0, sizeof(*rxbp));
-        cfs_spin_lock_init(&rxbp->rxbp_lock);
+       spin_lock_init(&rxbp->rxbp_lock);
         CFS_INIT_LIST_HEAD(&rxbp->rxbp_list);
 }
 
@@ -78,7 +78,7 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
 
         CDEBUG(D_NET, "kptllnd_rx_buffer_pool_reserve(%d)\n", count);
 
-        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+       spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
         for (;;) {
                 if (rxbp->rxbp_shutdown) {
@@ -92,7 +92,7 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
                         break;
                 }
                 
-                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+               spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                 
                 LIBCFS_ALLOC(rxb, sizeof(*rxb));
                 LIBCFS_ALLOC(buffer, bufsize);
@@ -105,7 +105,7 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
                         if (buffer != NULL)
                                 LIBCFS_FREE(buffer, bufsize);
                         
-                        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                       spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                         rc = -ENOMEM;
                         break;
                 }
@@ -120,15 +120,15 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
                 rxb->rxb_buffer = buffer;
                 rxb->rxb_mdh = PTL_INVALID_HANDLE;
 
-                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+               spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                 
                 if (rxbp->rxbp_shutdown) {
-                        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                       spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                         
                         LIBCFS_FREE(rxb, sizeof(*rxb));
                         LIBCFS_FREE(buffer, bufsize);
 
-                        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                       spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                         rc = -ESHUTDOWN;
                         break;
                 }
@@ -136,17 +136,17 @@ kptllnd_rx_buffer_pool_reserve(kptl_rx_buffer_pool_t *rxbp, int count)
                 cfs_list_add_tail(&rxb->rxb_list, &rxbp->rxbp_list);
                 rxbp->rxbp_count++;
 
-                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+               spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                 
                 kptllnd_rx_buffer_post(rxb);
 
-                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+               spin_lock_irqsave(&rxbp->rxbp_lock, flags);
         }
 
         if (rc == 0)
                 rxbp->rxbp_reserved += count;
 
-        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+       spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 
         return rc;
 }
@@ -157,12 +157,12 @@ kptllnd_rx_buffer_pool_unreserve(kptl_rx_buffer_pool_t *rxbp,
 {
         unsigned long flags;
 
-        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+       spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
         CDEBUG(D_NET, "kptllnd_rx_buffer_pool_unreserve(%d)\n", count);
         rxbp->rxbp_reserved -= count;
 
-        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+       spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 }
 
 void
@@ -187,7 +187,7 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
          * different MD) from when the MD is actually unlinked, to when the
          * event callback tells me it has been unlinked. */
 
-        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+       spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
         rxbp->rxbp_shutdown = 1;
 
@@ -196,10 +196,10 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
                         rxb = cfs_list_entry (tmp, kptl_rx_buffer_t, rxb_list);
 
                         if (rxb->rxb_idle) {
-                                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock,
+                               spin_unlock_irqrestore(&rxbp->rxbp_lock,
                                                            flags);
                                 kptllnd_rx_buffer_destroy(rxb);
-                                cfs_spin_lock_irqsave(&rxbp->rxbp_lock,
+                               spin_lock_irqsave(&rxbp->rxbp_lock,
                                                       flags);
                                 continue;
                         }
@@ -208,11 +208,11 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
                         if (PtlHandleIsEqual(mdh, PTL_INVALID_HANDLE))
                                 continue;
                         
-                        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+                       spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 
                         rc = PtlMDUnlink(mdh);
 
-                        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+                       spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                         
 #ifdef LUSTRE_PORTALS_UNLINK_SEMANTICS
                         /* callback clears rxb_mdh and drops net's ref
@@ -231,7 +231,7 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
                 if (cfs_list_empty(&rxbp->rxbp_list))
                         break;
 
-                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+               spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 
                 /* Wait a bit for references to be dropped */
                 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
@@ -240,10 +240,10 @@ kptllnd_rx_buffer_pool_fini(kptl_rx_buffer_pool_t *rxbp)
 
                 cfs_pause(cfs_time_seconds(1));
 
-                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+               spin_lock_irqsave(&rxbp->rxbp_lock, flags);
         }
 
-        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+       spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 }
 
 void
@@ -266,18 +266,18 @@ kptllnd_rx_buffer_post(kptl_rx_buffer_t *rxb)
         any.nid = PTL_NID_ANY;
         any.pid = PTL_PID_ANY;
 
-        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+       spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
         if (rxbp->rxbp_shutdown) {
                 rxb->rxb_idle = 1;
-                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+               spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                 return;
         }
 
         rxb->rxb_refcount = 1;                  /* net's ref */
         rxb->rxb_posted = 1;                    /* I'm posting */
         
-        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+       spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 
         rc = PtlMEAttach(kptllnd_data.kptl_nih,
                          *kptllnd_tunables.kptl_portal,
@@ -310,10 +310,10 @@ kptllnd_rx_buffer_post(kptl_rx_buffer_t *rxb)
 
         rc = PtlMDAttach(meh, md, PTL_UNLINK, &mdh);
         if (rc == PTL_OK) {
-                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+               spin_lock_irqsave(&rxbp->rxbp_lock, flags);
                 if (rxb->rxb_posted)            /* Not auto-unlinked yet!!! */
                         rxb->rxb_mdh = mdh;
-                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+               spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
                 return;
         }
         
@@ -323,11 +323,11 @@ kptllnd_rx_buffer_post(kptl_rx_buffer_t *rxb)
         LASSERT(rc == PTL_OK);
 
  failed:
-        cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+       spin_lock_irqsave(&rxbp->rxbp_lock, flags);
         rxb->rxb_posted = 0;
         /* XXX this will just try again immediately */
         kptllnd_rx_buffer_decref_locked(rxb);
-        cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+       spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
 }
 
 kptl_rx_t *
@@ -367,7 +367,7 @@ kptllnd_rx_done(kptl_rx_t *rx, int post_credit)
 
         if (peer != NULL) {
                 /* Update credits (after I've decref-ed the buffer) */
-                cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+               spin_lock_irqsave(&peer->peer_lock, flags);
 
                 if (post_credit == PTLLND_POSTRX_PEER_CREDIT)
                         peer->peer_outstanding_credits++;
@@ -381,7 +381,7 @@ kptllnd_rx_done(kptl_rx_t *rx, int post_credit)
                        peer->peer_outstanding_credits, peer->peer_sent_credits,
                        rx);
 
-                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+               spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                 /* I might have to send back credits */
                 kptllnd_peer_check_sends(peer);
@@ -475,26 +475,26 @@ kptllnd_rx_buffer_callback (ptl_event_t *ev)
                         rx->rx_initiator = ev->initiator;
                         rx->rx_treceived = jiffies;
                         /* Queue for attention */
-                        cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
+                       spin_lock_irqsave(&kptllnd_data.kptl_sched_lock,
                                               flags);
 
                         cfs_list_add_tail(&rx->rx_list,
                                           &kptllnd_data.kptl_sched_rxq);
                         cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
 
-                        cfs_spin_unlock_irqrestore(&kptllnd_data. \
+                       spin_unlock_irqrestore(&kptllnd_data. \
                                                    kptl_sched_lock, flags);
                 }
         }
 
         if (unlinked) {
-                cfs_spin_lock_irqsave(&rxbp->rxbp_lock, flags);
+               spin_lock_irqsave(&rxbp->rxbp_lock, flags);
 
                 rxb->rxb_posted = 0;
                 rxb->rxb_mdh = PTL_INVALID_HANDLE;
                 kptllnd_rx_buffer_decref_locked(rxb);
 
-                cfs_spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
+               spin_unlock_irqrestore(&rxbp->rxbp_lock, flags);
         }
 }
 
@@ -535,17 +535,17 @@ kptllnd_find_net (lnet_nid_t nid)
 {
         kptl_net_t *net;
 
-        cfs_read_lock(&kptllnd_data.kptl_net_rw_lock);
+       read_lock(&kptllnd_data.kptl_net_rw_lock);
         cfs_list_for_each_entry (net, &kptllnd_data.kptl_nets, net_list) {
                 LASSERT (!net->net_shutdown);
 
                 if (net->net_ni->ni_nid == nid) {
                         kptllnd_net_addref(net);
-                        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+                       read_unlock(&kptllnd_data.kptl_net_rw_lock);
                         return net;
                 }
         }
-        cfs_read_unlock(&kptllnd_data.kptl_net_rw_lock);
+       read_unlock(&kptllnd_data.kptl_net_rw_lock);
 
         return NULL;
 }
@@ -684,9 +684,9 @@ kptllnd_rx_parse(kptl_rx_t *rx)
 
                 if (peer->peer_state == PEER_STATE_WAITING_HELLO) {
                         /* recoverable error - restart txs */
-                        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+                       spin_lock_irqsave(&peer->peer_lock, flags);
                         kptllnd_cancel_txlist(&peer->peer_sendq, &txs);
-                        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+                       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                         CWARN("NAK %s: Unexpected %s message\n",
                               libcfs_id2str(srcid),
@@ -714,7 +714,7 @@ kptllnd_rx_parse(kptl_rx_t *rx)
         LASSERTF (msg->ptlm_srcpid == peer->peer_id.pid, "m %u p %u\n",
                   msg->ptlm_srcpid, peer->peer_id.pid);
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         /* Check peer only sends when I've sent her credits */
         if (peer->peer_sent_credits == 0) {
@@ -722,7 +722,7 @@ kptllnd_rx_parse(kptl_rx_t *rx)
                 int oc = peer->peer_outstanding_credits;
                 int sc = peer->peer_sent_credits;
 
-                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+               spin_unlock_irqrestore(&peer->peer_lock, flags);
 
                 CERROR("%s: buffer overrun [%d/%d+%d]\n",
                        libcfs_id2str(peer->peer_id), c, sc, oc);
@@ -741,7 +741,7 @@ kptllnd_rx_parse(kptl_rx_t *rx)
                 post_credit = PTLLND_POSTRX_NO_CREDIT;
         }
 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         /* See if something can go out now that credits have come in */
         if (msg->ptlm_credits != 0)
@@ -788,14 +788,14 @@ kptllnd_rx_parse(kptl_rx_t *rx)
                          PTL_RESERVED_MATCHBITS);
 
                 /* Update last match bits seen */
-                cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+               spin_lock_irqsave(&peer->peer_lock, flags);
 
                 if (msg->ptlm_u.rdma.kptlrm_matchbits >
                     rx->rx_peer->peer_last_matchbits_seen)
                         rx->rx_peer->peer_last_matchbits_seen =
                                 msg->ptlm_u.rdma.kptlrm_matchbits;
 
-                cfs_spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
+               spin_unlock_irqrestore(&rx->rx_peer->peer_lock, flags);
 
                 rc = lnet_parse(net->net_ni,
                                 &msg->ptlm_u.rdma.kptlrm_hdr,
index 17b1d5b..eaef266 100644 (file)
@@ -110,9 +110,9 @@ kptllnd_setup_tx_descs()
                 if (tx == NULL)
                         return -ENOMEM;
 
-                cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+               spin_lock(&kptllnd_data.kptl_tx_lock);
                 cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
-                cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
+               spin_unlock(&kptllnd_data.kptl_tx_lock);
         }
 
         return 0;
@@ -159,10 +159,10 @@ kptllnd_get_idle_tx(enum kptl_tx_type type)
                 return NULL;
         }
 
-        cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+       spin_lock(&kptllnd_data.kptl_tx_lock);
 
         if (cfs_list_empty (&kptllnd_data.kptl_idle_txs)) {
-                cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
+               spin_unlock(&kptllnd_data.kptl_tx_lock);
 
                 tx = kptllnd_alloc_tx();
                 if (tx == NULL)
@@ -172,7 +172,7 @@ kptllnd_get_idle_tx(enum kptl_tx_type type)
                                     kptl_tx_t, tx_list);
                 cfs_list_del(&tx->tx_list);
 
-                cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
+               spin_unlock(&kptllnd_data.kptl_tx_lock);
         }
 
         LASSERT (cfs_atomic_read(&tx->tx_refcount)== 0);
@@ -207,14 +207,14 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
         LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
         LASSERT (!tx->tx_active);
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         msg_mdh = tx->tx_msg_mdh;
         rdma_mdh = tx->tx_rdma_mdh;
 
         if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
             PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
-                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+               spin_unlock_irqrestore(&peer->peer_lock, flags);
                 return 0;
         }
         
@@ -229,7 +229,7 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
         tx->tx_active = 1;
         cfs_list_add_tail(&tx->tx_list, &peer->peer_activeq);
         
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         /* These unlinks will ensure completion events (normal or unlink) will
          * happen ASAP */
@@ -255,14 +255,14 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
         LASSERT (cfs_atomic_read(&tx->tx_refcount) == 0);
         LASSERT (!tx->tx_active);
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         msg_mdh = tx->tx_msg_mdh;
         rdma_mdh = tx->tx_rdma_mdh;
 
         if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
             PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
-                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+               spin_unlock_irqrestore(&peer->peer_lock, flags);
                 return 0;
         }
         
@@ -272,7 +272,7 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
                  (tx->tx_lnet_msg == NULL && 
                   tx->tx_replymsg == NULL));
 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         if (!PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE)) {
                 prc = PtlMDUnlink(msg_mdh);
@@ -286,7 +286,7 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
                         rdma_mdh = PTL_INVALID_HANDLE;
         }
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         /* update tx_???_mdh if callback hasn't fired */
         if (PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE))
@@ -301,7 +301,7 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
 
         if (PtlHandleIsEqual(msg_mdh, PTL_INVALID_HANDLE) &&
             PtlHandleIsEqual(rdma_mdh, PTL_INVALID_HANDLE)) {
-                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+               spin_unlock_irqrestore(&peer->peer_lock, flags);
                 return 0;
         }
 
@@ -312,7 +312,7 @@ kptllnd_tx_abort_netio(kptl_tx_t *tx)
 
         kptllnd_peer_addref(peer);              /* extra ref for me... */
 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         /* This will get the watchdog thread to try aborting all the peer's
          * comms again.  NB, this deems it fair that 1 failing tx which can't
@@ -355,9 +355,9 @@ kptllnd_tx_fini (kptl_tx_t *tx)
         tx->tx_peer = NULL;
         tx->tx_idle = 1;
 
-        cfs_spin_lock(&kptllnd_data.kptl_tx_lock);
+       spin_lock(&kptllnd_data.kptl_tx_lock);
         cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_idle_txs);
-        cfs_spin_unlock(&kptllnd_data.kptl_tx_lock);
+       spin_unlock(&kptllnd_data.kptl_tx_lock);
 
         /* Must finalize AFTER freeing 'tx' */
         if (msg != NULL)
@@ -491,7 +491,7 @@ kptllnd_tx_callback(ptl_event_t *ev)
         if (!unlinked)
                 return;
 
-        cfs_spin_lock_irqsave(&peer->peer_lock, flags);
+       spin_lock_irqsave(&peer->peer_lock, flags);
 
         if (ismsg)
                 tx->tx_msg_mdh = PTL_INVALID_HANDLE;
@@ -501,24 +501,24 @@ kptllnd_tx_callback(ptl_event_t *ev)
         if (!PtlHandleIsEqual(tx->tx_msg_mdh, PTL_INVALID_HANDLE) ||
             !PtlHandleIsEqual(tx->tx_rdma_mdh, PTL_INVALID_HANDLE) ||
             !tx->tx_active) {
-                cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+               spin_unlock_irqrestore(&peer->peer_lock, flags);
                 return;
         }
 
         cfs_list_del(&tx->tx_list);
         tx->tx_active = 0;
 
-        cfs_spin_unlock_irqrestore(&peer->peer_lock, flags);
+       spin_unlock_irqrestore(&peer->peer_lock, flags);
 
         /* drop peer's ref, but if it was the last one... */
         if (cfs_atomic_dec_and_test(&tx->tx_refcount)) {
                 /* ...finalize it in thread context! */
-                cfs_spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
+               spin_lock_irqsave(&kptllnd_data.kptl_sched_lock, flags);
 
                 cfs_list_add_tail(&tx->tx_list, &kptllnd_data.kptl_sched_txq);
                 cfs_waitq_signal(&kptllnd_data.kptl_sched_waitq);
 
-                cfs_spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
+               spin_unlock_irqrestore(&kptllnd_data.kptl_sched_lock,
                                            flags);
         }
 }
index d4b543c..1c8298e 100644 (file)
@@ -61,7 +61,7 @@ kqswnal_get_tx_desc (struct libcfs_ioctl_data *data)
        int                index = data->ioc_count;
        int                rc = -ENOENT;
 
-       cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+       spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
 
        cfs_list_for_each (tmp, &kqswnal_data.kqn_activetxds) {
                if (index-- != 0)
@@ -82,7 +82,7 @@ kqswnal_get_tx_desc (struct libcfs_ioctl_data *data)
                break;
        }
 
-       cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+       spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
        return (rc);
 }
 
@@ -136,9 +136,9 @@ kqswnal_shutdown(lnet_ni_t *ni)
 
        /**********************************************************************/
        /* Signal the start of shutdown... */
-       cfs_spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
+       spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
        kqswnal_data.kqn_shuttingdown = 1;
-       cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
+       spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
 
        /**********************************************************************/
        /* wait for sends that have allocated a tx desc to launch or give up */
@@ -298,13 +298,13 @@ kqswnal_startup (lnet_ni_t *ni)
 
        CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_idletxds);
        CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_activetxds);
-       cfs_spin_lock_init (&kqswnal_data.kqn_idletxd_lock);
+       spin_lock_init(&kqswnal_data.kqn_idletxd_lock);
 
        CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_delayedtxds);
        CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_donetxds);
        CFS_INIT_LIST_HEAD (&kqswnal_data.kqn_readyrxds);
 
-       cfs_spin_lock_init (&kqswnal_data.kqn_sched_lock);
+       spin_lock_init(&kqswnal_data.kqn_sched_lock);
        cfs_waitq_init (&kqswnal_data.kqn_sched_waitq);
 
        /* pointers/lists/locks initialised */
index 14a2845..aa6b3de 100644 (file)
@@ -262,10 +262,10 @@ typedef struct
 
         cfs_list_t           kqn_idletxds;    /* transmit descriptors free to use */
         cfs_list_t           kqn_activetxds;  /* transmit descriptors being used */
-        cfs_spinlock_t       kqn_idletxd_lock; /* serialise idle txd access */
-        cfs_atomic_t         kqn_pending_txs;/* # transmits being prepped */
+       spinlock_t      kqn_idletxd_lock;    /* serialise idle txd access */
+       cfs_atomic_t    kqn_pending_txs;     /* # transmits being prepped */
 
-        cfs_spinlock_t       kqn_sched_lock; /* serialise packet schedulers */
+       spinlock_t      kqn_sched_lock;      /* serialise packet schedulers */
         cfs_waitq_t          kqn_sched_waitq;/* scheduler blocks here */
 
         cfs_list_t           kqn_readyrxds;  /* rxds full of data */
index c34ba6d..868436c 100644 (file)
@@ -350,17 +350,17 @@ kqswnal_csum_iov (__u32 csum, int offset, int nob,
 void
 kqswnal_put_idle_tx (kqswnal_tx_t *ktx)
 {
-        unsigned long     flags;
+       unsigned long     flags;
 
-        kqswnal_unmap_tx (ktx);                 /* release temporary mappings */
-        ktx->ktx_state = KTX_IDLE;
+       kqswnal_unmap_tx(ktx);                  /* release temporary mappings */
+       ktx->ktx_state = KTX_IDLE;
 
-        cfs_spin_lock_irqsave (&kqswnal_data.kqn_idletxd_lock, flags);
+       spin_lock_irqsave(&kqswnal_data.kqn_idletxd_lock, flags);
 
-        cfs_list_del (&ktx->ktx_list);              /* take off active list */
-        cfs_list_add (&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
+       cfs_list_del(&ktx->ktx_list);           /* take off active list */
+       cfs_list_add(&ktx->ktx_list, &kqswnal_data.kqn_idletxds);
 
-        cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+       spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
 }
 
 kqswnal_tx_t *
@@ -387,7 +387,7 @@ kqswnal_get_idle_tx (void)
         ktx->ktx_launcher = current->pid;
         cfs_atomic_inc(&kqswnal_data.kqn_pending_txs);
 
-        cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_idletxd_lock, flags);
+       spin_unlock_irqrestore(&kqswnal_data.kqn_idletxd_lock, flags);
 
         /* Idle descs can't have any mapped (as opposed to pre-mapped) pages */
         LASSERT (ktx->ktx_nmappedpages == 0);
@@ -513,13 +513,13 @@ kqswnal_tx_done (kqswnal_tx_t *ktx, int status)
         }
 
         /* Complete the send in thread context */
-        cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
+       spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
 
-        cfs_list_add_tail(&ktx->ktx_schedlist,
-                          &kqswnal_data.kqn_donetxds);
-        cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
+       cfs_list_add_tail(&ktx->ktx_schedlist,
+                          &kqswnal_data.kqn_donetxds);
+       cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
 
-        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
+       spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
 
 static void
@@ -664,13 +664,13 @@ kqswnal_launch (kqswnal_tx_t *ktx)
                 return (0);
 
         case EP_ENOMEM: /* can't allocate ep txd => queue for later */
-                cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+               spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
 
-                cfs_list_add_tail (&ktx->ktx_schedlist,
-                                   &kqswnal_data.kqn_delayedtxds);
-                cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
+               cfs_list_add_tail(&ktx->ktx_schedlist,
+                                 &kqswnal_data.kqn_delayedtxds);
+               cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
 
-                cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock,
+               spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
                                             flags);
                 return (0);
 
@@ -1538,12 +1538,12 @@ kqswnal_rxhandler(EP_RXD *rxd)
                 return;
         }
 
-        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+       spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
 
-        cfs_list_add_tail (&krx->krx_list, &kqswnal_data.kqn_readyrxds);
-        cfs_waitq_signal (&kqswnal_data.kqn_sched_waitq);
+       cfs_list_add_tail(&krx->krx_list, &kqswnal_data.kqn_readyrxds);
+       cfs_waitq_signal(&kqswnal_data.kqn_sched_waitq);
 
-        cfs_spin_unlock_irqrestore (&kqswnal_data.kqn_sched_lock, flags);
+       spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock, flags);
 }
 
 int
@@ -1684,7 +1684,7 @@ kqswnal_scheduler (void *arg)
         cfs_daemonize ("kqswnal_sched");
         cfs_block_allsigs ();
 
-        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock, flags);
+       spin_lock_irqsave(&kqswnal_data.kqn_sched_lock, flags);
 
         for (;;)
         {
@@ -1695,14 +1695,14 @@ kqswnal_scheduler (void *arg)
                         krx = cfs_list_entry(kqswnal_data.kqn_readyrxds.next,
                                              kqswnal_rx_t, krx_list);
                         cfs_list_del (&krx->krx_list);
-                        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+                       spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
                                                    flags);
 
                         LASSERT (krx->krx_state == KRX_PARSE);
                         kqswnal_parse (krx);
 
                         did_something = 1;
-                        cfs_spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
+                       spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
                                               flags);
                 }
 
@@ -1711,13 +1711,13 @@ kqswnal_scheduler (void *arg)
                         ktx = cfs_list_entry(kqswnal_data.kqn_donetxds.next,
                                              kqswnal_tx_t, ktx_schedlist);
                         cfs_list_del_init (&ktx->ktx_schedlist);
-                        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+                       spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
                                                    flags);
 
                         kqswnal_tx_done_in_thread_context(ktx);
 
                         did_something = 1;
-                        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+                       spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
                                                flags);
                 }
 
@@ -1726,7 +1726,7 @@ kqswnal_scheduler (void *arg)
                         ktx = cfs_list_entry(kqswnal_data.kqn_delayedtxds.next,
                                              kqswnal_tx_t, ktx_schedlist);
                         cfs_list_del_init (&ktx->ktx_schedlist);
-                        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+                       spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
                                                    flags);
 
                         rc = kqswnal_launch (ktx);
@@ -1738,13 +1738,13 @@ kqswnal_scheduler (void *arg)
                         cfs_atomic_dec (&kqswnal_data.kqn_pending_txs);
 
                         did_something = 1;
-                        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+                       spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
                                                flags);
                 }
 
                 /* nothing to do or hogging CPU */
                 if (!did_something || counter++ == KQSW_RESCHED) {
-                        cfs_spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
+                       spin_unlock_irqrestore(&kqswnal_data.kqn_sched_lock,
                                                    flags);
 
                         counter = 0;
@@ -1768,7 +1768,7 @@ kqswnal_scheduler (void *arg)
                         } else if (need_resched())
                                 cfs_schedule ();
 
-                        cfs_spin_lock_irqsave (&kqswnal_data.kqn_sched_lock,
+                       spin_lock_irqsave(&kqswnal_data.kqn_sched_lock,
                                                flags);
                 }
         }
index 664ec59..a7b22ea 100644 (file)
@@ -278,7 +278,7 @@ kranal_set_conn_uniqueness (kra_conn_t *conn)
 {
         unsigned long  flags;
 
-        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         conn->rac_my_connstamp = kranal_data.kra_connstamp++;
 
@@ -286,7 +286,7 @@ kranal_set_conn_uniqueness (kra_conn_t *conn)
                 conn->rac_cqid = kranal_data.kra_next_cqid++;
         } while (kranal_cqid2conn_locked(conn->rac_cqid) != NULL);
 
-        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 }
 
 int
@@ -309,7 +309,7 @@ kranal_create_conn(kra_conn_t **connp, kra_device_t *dev)
         CFS_INIT_LIST_HEAD(&conn->rac_fmaq);
         CFS_INIT_LIST_HEAD(&conn->rac_rdmaq);
         CFS_INIT_LIST_HEAD(&conn->rac_replyq);
-        cfs_spin_lock_init(&conn->rac_lock);
+       spin_lock_init(&conn->rac_lock);
 
         kranal_set_conn_uniqueness(conn);
 
@@ -416,12 +416,12 @@ kranal_close_conn (kra_conn_t *conn, int error)
         unsigned long    flags;
 
 
-        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         if (conn->rac_state == RANAL_CONN_ESTABLISHED)
                 kranal_close_conn_locked(conn, error);
 
-        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 }
 
 int
@@ -446,10 +446,10 @@ kranal_set_conn_params(kra_conn_t *conn, kra_connreq_t *connreq,
 
         /* Schedule conn on rad_new_conns */
         kranal_conn_addref(conn);
-        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+       spin_lock_irqsave(&dev->rad_lock, flags);
         cfs_list_add_tail(&conn->rac_schedlist, &dev->rad_new_conns);
         cfs_waitq_signal(&dev->rad_waitq);
-        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+       spin_unlock_irqrestore(&dev->rad_lock, flags);
 
         rrc = RapkWaitToConnect(conn->rac_rihandle);
         if (rrc != RAP_SUCCESS) {
@@ -668,11 +668,11 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
                 if (rc != 0)
                         return rc;
 
-                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+               write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 if (!kranal_peer_active(peer)) {
                         /* raced with peer getting unlinked */
-                        cfs_write_unlock_irqrestore(&kranal_data. \
+                       write_unlock_irqrestore(&kranal_data. \
                                                     kra_global_lock,
                                                     flags);
                         kranal_conn_decref(conn);
@@ -698,7 +698,7 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
                         return -ENOMEM;
                 }
 
-                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+               write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 peer2 = kranal_find_peer_locked(peer_nid);
                 if (peer2 == NULL) {
@@ -716,7 +716,7 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
          * this while holding the global lock, to synch with connection
          * destruction on NID change. */
         if (kranal_data.kra_ni->ni_nid != dst_nid) {
-                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
                                             flags);
 
                 CERROR("Stale/bad connection with %s: dst_nid %s, expected %s\n",
@@ -733,7 +733,7 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
         if (rc != 0) {
                 LASSERT (!cfs_list_empty(&peer->rap_conns));
                 LASSERT (cfs_list_empty(&peer->rap_tx_queue));
-                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
                                             flags);
                 CWARN("Not creating duplicate connection to %s: %d\n",
                       libcfs_nid2str(peer_nid), rc);
@@ -769,7 +769,7 @@ kranal_conn_handshake (struct socket *sock, kra_peer_t *peer)
 
         nstale = kranal_close_stale_conns_locked(peer, conn);
 
-        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         /* CAVEAT EMPTOR: passive peer can disappear NOW */
 
@@ -811,7 +811,7 @@ kranal_connect (kra_peer_t *peer)
         CDEBUG(D_NET, "Done handshake %s:%d \n", 
                libcfs_nid2str(peer->rap_nid), rc);
 
-        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         LASSERT (peer->rap_connecting);
         peer->rap_connecting = 0;
@@ -823,7 +823,7 @@ kranal_connect (kra_peer_t *peer)
 
                 peer->rap_reconnect_interval = 0; /* OK to reconnect at any time */
 
-                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
                                             flags);
                 return;
         }
@@ -843,7 +843,7 @@ kranal_connect (kra_peer_t *peer)
         cfs_list_add(&zombies, &peer->rap_tx_queue);
         cfs_list_del_init(&peer->rap_tx_queue);
 
-        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         if (cfs_list_empty(&zombies))
                 return;
@@ -888,12 +888,12 @@ kranal_accept (lnet_ni_t *ni, struct socket *sock)
 
         ras->ras_sock = sock;
 
-        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
         cfs_list_add_tail(&ras->ras_list, &kranal_data.kra_connd_acceptq);
         cfs_waitq_signal(&kranal_data.kra_connd_waitq);
 
-        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+       spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
         return 0;
 }
 
@@ -921,11 +921,11 @@ kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
 
         peer->rap_reconnect_interval = 0;       /* OK to connect at any time */
 
-        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         if (kranal_data.kra_nonewpeers) {
                 /* shutdown has started already */
-                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
                                             flags);
 
                 LIBCFS_FREE(peer, sizeof(*peer));
@@ -935,7 +935,7 @@ kranal_create_peer (kra_peer_t **peerp, lnet_nid_t nid)
 
         cfs_atomic_inc(&kranal_data.kra_npeers);
 
-        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         *peerp = peer;
         return 0;
@@ -994,11 +994,11 @@ kranal_find_peer (lnet_nid_t nid)
 {
         kra_peer_t     *peer;
 
-        cfs_read_lock(&kranal_data.kra_global_lock);
+       read_lock(&kranal_data.kra_global_lock);
         peer = kranal_find_peer_locked(nid);
         if (peer != NULL)                       /* +1 ref for caller? */
                 kranal_peer_addref(peer);
-        cfs_read_unlock(&kranal_data.kra_global_lock);
+       read_unlock(&kranal_data.kra_global_lock);
 
         return peer;
 }
@@ -1024,7 +1024,7 @@ kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
         cfs_list_t        *ptmp;
         int                i;
 
-        cfs_read_lock(&kranal_data.kra_global_lock);
+       read_lock(&kranal_data.kra_global_lock);
 
         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
 
@@ -1042,12 +1042,12 @@ kranal_get_peer_info (int index, lnet_nid_t *nidp, __u32 *ipp, int *portp,
                         *portp = peer->rap_port;
                         *persistencep = peer->rap_persistence;
 
-                        cfs_read_unlock(&kranal_data.kra_global_lock);
+                       read_unlock(&kranal_data.kra_global_lock);
                         return 0;
                 }
         }
 
-        cfs_read_unlock(&kranal_data.kra_global_lock);
+       read_unlock(&kranal_data.kra_global_lock);
         return -ENOENT;
 }
 
@@ -1066,7 +1066,7 @@ kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
         if (rc != 0)
                 return rc;
 
-        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         peer2 = kranal_find_peer_locked(nid);
         if (peer2 != NULL) {
@@ -1082,7 +1082,7 @@ kranal_add_persistent_peer (lnet_nid_t nid, __u32 ip, int port)
         peer->rap_port = port;
         peer->rap_persistence++;
 
-        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
         return 0;
 }
 
@@ -1119,7 +1119,7 @@ kranal_del_peer (lnet_nid_t nid)
         int                i;
         int                rc = -ENOENT;
 
-        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         if (nid != LNET_NID_ANY)
                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
@@ -1142,7 +1142,7 @@ kranal_del_peer (lnet_nid_t nid)
                 }
         }
 
-        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         return rc;
 }
@@ -1156,7 +1156,7 @@ kranal_get_conn_by_idx (int index)
         cfs_list_t        *ctmp;
         int                i;
 
-        cfs_read_lock (&kranal_data.kra_global_lock);
+       read_lock(&kranal_data.kra_global_lock);
 
         for (i = 0; i < kranal_data.kra_peer_hash_size; i++) {
                 cfs_list_for_each (ptmp, &kranal_data.kra_peers[i]) {
@@ -1175,13 +1175,13 @@ kranal_get_conn_by_idx (int index)
                                        libcfs_nid2str(conn->rac_peer->rap_nid),
                                        cfs_atomic_read(&conn->rac_refcount));
                                 cfs_atomic_inc(&conn->rac_refcount);
-                                cfs_read_unlock(&kranal_data.kra_global_lock);
+                               read_unlock(&kranal_data.kra_global_lock);
                                 return conn;
                         }
                 }
         }
 
-        cfs_read_unlock(&kranal_data.kra_global_lock);
+       read_unlock(&kranal_data.kra_global_lock);
         return NULL;
 }
 
@@ -1215,7 +1215,7 @@ kranal_close_matching_conns (lnet_nid_t nid)
         int                 i;
         int                 count = 0;
 
-        cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+       write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
         if (nid != LNET_NID_ANY)
                 lo = hi = kranal_nid2peerlist(nid) - kranal_data.kra_peers;
@@ -1238,7 +1238,7 @@ kranal_close_matching_conns (lnet_nid_t nid)
                 }
         }
 
-        cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
+       write_unlock_irqrestore(&kranal_data.kra_global_lock, flags);
 
         /* wildcards always succeed */
         if (nid == LNET_NID_ANY)
@@ -1448,9 +1448,9 @@ kranal_shutdown (lnet_ni_t *ni)
 
         case RANAL_INIT_ALL:
                 /* Prevent new peers from being created */
-                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+               write_lock_irqsave(&kranal_data.kra_global_lock, flags);
                 kranal_data.kra_nonewpeers = 1;
-                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
                                             flags);
 
                 /* Remove all existing peers from the peer table */
@@ -1458,19 +1458,19 @@ kranal_shutdown (lnet_ni_t *ni)
 
                 /* Wait for pending conn reqs to be handled */
                 i = 2;
-                cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+               spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
                 while (!cfs_list_empty(&kranal_data.kra_connd_acceptq)) {
-                        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+                       spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
                                                    flags);
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n */
                                "waiting for conn reqs to clean up\n");
                         cfs_pause(cfs_time_seconds(1));
 
-                        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+                       spin_lock_irqsave(&kranal_data.kra_connd_lock,
                                               flags);
                 }
-                cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+               spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
                 /* Wait for all peers to be freed */
                 i = 2;
@@ -1500,19 +1500,19 @@ kranal_shutdown (lnet_ni_t *ni)
         for (i = 0; i < kranal_data.kra_ndevs; i++) {
                 kra_device_t *dev = &kranal_data.kra_devices[i];
 
-                cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+               spin_lock_irqsave(&dev->rad_lock, flags);
                 cfs_waitq_signal(&dev->rad_waitq);
-                cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+               spin_unlock_irqrestore(&dev->rad_lock, flags);
         }
 
-        cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
         cfs_waitq_broadcast(&kranal_data.kra_reaper_waitq);
-        cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+       spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
         LASSERT (cfs_list_empty(&kranal_data.kra_connd_peers));
-        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
         cfs_waitq_broadcast(&kranal_data.kra_connd_waitq);
-        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+       spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
         /* Wait for threads to exit */
         i = 2;
@@ -1603,7 +1603,7 @@ kranal_startup (lnet_ni_t *ni)
         kranal_data.kra_connstamp =
         kranal_data.kra_peerstamp = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
 
-        cfs_rwlock_init(&kranal_data.kra_global_lock);
+       rwlock_init(&kranal_data.kra_global_lock);
 
         for (i = 0; i < RANAL_MAXDEVS; i++ ) {
                 kra_device_t  *dev = &kranal_data.kra_devices[i];
@@ -1612,20 +1612,20 @@ kranal_startup (lnet_ni_t *ni)
                 CFS_INIT_LIST_HEAD(&dev->rad_ready_conns);
                 CFS_INIT_LIST_HEAD(&dev->rad_new_conns);
                 cfs_waitq_init(&dev->rad_waitq);
-                cfs_spin_lock_init(&dev->rad_lock);
+               spin_lock_init(&dev->rad_lock);
         }
 
         kranal_data.kra_new_min_timeout = CFS_MAX_SCHEDULE_TIMEOUT;
         cfs_waitq_init(&kranal_data.kra_reaper_waitq);
-        cfs_spin_lock_init(&kranal_data.kra_reaper_lock);
+       spin_lock_init(&kranal_data.kra_reaper_lock);
 
         CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_acceptq);
         CFS_INIT_LIST_HEAD(&kranal_data.kra_connd_peers);
         cfs_waitq_init(&kranal_data.kra_connd_waitq);
-        cfs_spin_lock_init(&kranal_data.kra_connd_lock);
+       spin_lock_init(&kranal_data.kra_connd_lock);
 
         CFS_INIT_LIST_HEAD(&kranal_data.kra_idle_txs);
-        cfs_spin_lock_init(&kranal_data.kra_tx_lock);
+       spin_lock_init(&kranal_data.kra_tx_lock);
 
         /* OK to call kranal_api_shutdown() to cleanup now */
         kranal_data.kra_init = RANAL_INIT_DATA;
index 606e8cd..df04a46 100644 (file)
@@ -111,7 +111,7 @@ typedef struct
         cfs_list_t             rad_ready_conns;/* connections ready to tx/rx */
         cfs_list_t             rad_new_conns; /* new connections to complete */
         cfs_waitq_t            rad_waitq;     /* scheduler waits here */
-        cfs_spinlock_t         rad_lock;      /* serialise */
+       spinlock_t              rad_lock;       /* serialise */
         void                  *rad_scheduler; /* scheduling thread */
         unsigned int           rad_nphysmap;  /* # phys mappings */
         unsigned int           rad_nppphysmap;/* # phys pages mapped */
@@ -129,7 +129,7 @@ typedef struct
         kra_device_t      kra_devices[RANAL_MAXDEVS]; /* device/ptag/cq */
         int               kra_ndevs;           /* # devices */
 
-        cfs_rwlock_t      kra_global_lock;     /* stabilize peer/conn ops */
+       rwlock_t          kra_global_lock;      /* stabilize peer/conn ops */
 
         cfs_list_t       *kra_peers;           /* hash table of all my known peers */
         int               kra_peer_hash_size;  /* size of kra_peers */
@@ -145,16 +145,16 @@ typedef struct
 
         long              kra_new_min_timeout; /* minimum timeout on any new conn */
         cfs_waitq_t       kra_reaper_waitq;    /* reaper sleeps here */
-        cfs_spinlock_t    kra_reaper_lock;     /* serialise */
+       spinlock_t        kra_reaper_lock;     /* serialise */
 
         cfs_list_t        kra_connd_peers;     /* peers waiting for a connection */
         cfs_list_t        kra_connd_acceptq;   /* accepted sockets to handshake */
         cfs_waitq_t       kra_connd_waitq;     /* connection daemons sleep here */
-        cfs_spinlock_t    kra_connd_lock;      /* serialise */
+       spinlock_t        kra_connd_lock;       /* serialise */
 
         cfs_list_t        kra_idle_txs;        /* idle tx descriptors */
         __u64             kra_next_tx_cookie;  /* RDMA completion cookie */
-        cfs_spinlock_t    kra_tx_lock;         /* serialise */
+       spinlock_t        kra_tx_lock;          /* serialise */
 } kra_data_t;
 
 #define RANAL_INIT_NOTHING         0
@@ -308,7 +308,7 @@ typedef struct kra_conn
         unsigned int        rac_close_recvd;   /* I've received CLOSE */
         unsigned int        rac_state;         /* connection state */
         unsigned int        rac_scheduled;     /* being attented to */
-        cfs_spinlock_t      rac_lock;          /* serialise */
+       spinlock_t          rac_lock;           /* serialise */
         kra_device_t       *rac_device;        /* which device */
         RAP_PVOID           rac_rihandle;      /* RA endpoint */
         kra_msg_t          *rac_rxmsg;         /* incoming message (FMA prefix) */
index 1b4cd35..36c0067 100644 (file)
@@ -53,14 +53,14 @@ kranal_device_callback(RAP_INT32 devid, RAP_PVOID arg)
                 if (dev->rad_id != devid)
                         continue;
 
-                cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+               spin_lock_irqsave(&dev->rad_lock, flags);
 
                 if (!dev->rad_ready) {
                         dev->rad_ready = 1;
                         cfs_waitq_signal(&dev->rad_waitq);
                 }
 
-                cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+               spin_unlock_irqrestore(&dev->rad_lock, flags);
                 return;
         }
 
@@ -73,7 +73,7 @@ kranal_schedule_conn(kra_conn_t *conn)
         kra_device_t    *dev = conn->rac_device;
         unsigned long    flags;
 
-        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+       spin_lock_irqsave(&dev->rad_lock, flags);
 
         if (!conn->rac_scheduled) {
                 kranal_conn_addref(conn);       /* +1 ref for scheduler */
@@ -82,7 +82,7 @@ kranal_schedule_conn(kra_conn_t *conn)
                 cfs_waitq_signal(&dev->rad_waitq);
         }
 
-        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+       spin_unlock_irqrestore(&dev->rad_lock, flags);
 }
 
 kra_tx_t *
@@ -91,10 +91,10 @@ kranal_get_idle_tx (void)
         unsigned long  flags;
         kra_tx_t      *tx;
 
-        cfs_spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
 
         if (cfs_list_empty(&kranal_data.kra_idle_txs)) {
-                cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+               spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
                 return NULL;
         }
 
@@ -105,7 +105,7 @@ kranal_get_idle_tx (void)
          * got a lock right now... */
         tx->tx_cookie = kranal_data.kra_next_tx_cookie++;
 
-        cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+       spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
 
         LASSERT (tx->tx_buftype == RANAL_BUF_NONE);
         LASSERT (tx->tx_msg.ram_type == RANAL_MSG_NONE);
@@ -398,11 +398,11 @@ kranal_tx_done (kra_tx_t *tx, int completion)
         tx->tx_msg.ram_type = RANAL_MSG_NONE;
         tx->tx_conn = NULL;
 
-        cfs_spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_tx_lock, flags);
 
         cfs_list_add_tail(&tx->tx_list, &kranal_data.kra_idle_txs);
 
-        cfs_spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
+       spin_unlock_irqrestore(&kranal_data.kra_tx_lock, flags);
 
         /* finalize AFTER freeing lnet msgs */
         for (i = 0; i < 2; i++) {
@@ -433,10 +433,10 @@ kranal_post_fma (kra_conn_t *conn, kra_tx_t *tx)
 
         tx->tx_conn = conn;
 
-        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+       spin_lock_irqsave(&conn->rac_lock, flags);
         cfs_list_add_tail(&tx->tx_list, &conn->rac_fmaq);
         tx->tx_qtime = jiffies;
-        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+       spin_unlock_irqrestore(&conn->rac_lock, flags);
 
         kranal_schedule_conn(conn);
 }
@@ -449,7 +449,7 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
         kra_conn_t      *conn;
         int              rc;
         int              retry;
-        cfs_rwlock_t    *g_lock = &kranal_data.kra_global_lock;
+       rwlock_t    *g_lock = &kranal_data.kra_global_lock;
 
         /* If I get here, I've committed to send, so I complete the tx with
          * failure on any problems */
@@ -458,27 +458,27 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
 
         for (retry = 0; ; retry = 1) {
 
-                cfs_read_lock(g_lock);
+               read_lock(g_lock);
 
                 peer = kranal_find_peer_locked(nid);
                 if (peer != NULL) {
                         conn = kranal_find_conn_locked(peer);
                         if (conn != NULL) {
                                 kranal_post_fma(conn, tx);
-                                cfs_read_unlock(g_lock);
+                               read_unlock(g_lock);
                                 return;
                         }
                 }
                 
                 /* Making connections; I'll need a write lock... */
-                cfs_read_unlock(g_lock);
-                cfs_write_lock_irqsave(g_lock, flags);
+               read_unlock(g_lock);
+               write_lock_irqsave(g_lock, flags);
 
                 peer = kranal_find_peer_locked(nid);
                 if (peer != NULL)
                         break;
                 
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
                 
                 if (retry) {
                         CERROR("Can't find peer %s\n", libcfs_nid2str(nid));
@@ -500,7 +500,7 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
         if (conn != NULL) {
                 /* Connection exists; queue message on it */
                 kranal_post_fma(conn, tx);
-                cfs_write_unlock_irqrestore(g_lock, flags);
+               write_unlock_irqrestore(g_lock, flags);
                 return;
         }
                         
@@ -511,7 +511,7 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
 
                 if (!(peer->rap_reconnect_interval == 0 || /* first attempt */
                       cfs_time_aftereq(jiffies, peer->rap_reconnect_time))) {
-                        cfs_write_unlock_irqrestore(g_lock, flags);
+                       write_unlock_irqrestore(g_lock, flags);
                         kranal_tx_done(tx, -EHOSTUNREACH);
                         return;
                 }
@@ -519,19 +519,19 @@ kranal_launch_tx (kra_tx_t *tx, lnet_nid_t nid)
                 peer->rap_connecting = 1;
                 kranal_peer_addref(peer); /* extra ref for connd */
 
-                cfs_spin_lock(&kranal_data.kra_connd_lock);
+               spin_lock(&kranal_data.kra_connd_lock);
 
                 cfs_list_add_tail(&peer->rap_connd_list,
                               &kranal_data.kra_connd_peers);
                 cfs_waitq_signal(&kranal_data.kra_connd_waitq);
 
-                cfs_spin_unlock(&kranal_data.kra_connd_lock);
+               spin_unlock(&kranal_data.kra_connd_lock);
         }
 
         /* A connection is being established; queue the message... */
         cfs_list_add_tail(&tx->tx_list, &peer->rap_tx_queue);
 
-        cfs_write_unlock_irqrestore(g_lock, flags);
+       write_unlock_irqrestore(g_lock, flags);
 }
 
 void
@@ -571,10 +571,10 @@ kranal_rdma(kra_tx_t *tx, int type,
         rrc = RapkPostRdma(conn->rac_rihandle, &tx->tx_rdma_desc);
         LASSERT (rrc == RAP_SUCCESS);
 
-        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+       spin_lock_irqsave(&conn->rac_lock, flags);
         cfs_list_add_tail(&tx->tx_list, &conn->rac_rdmaq);
         tx->tx_qtime = jiffies;
-        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+       spin_unlock_irqrestore(&conn->rac_lock, flags);
 }
 
 int
@@ -946,13 +946,13 @@ kranal_check_conn_timeouts (kra_conn_t *conn)
          * in case of hardware/software errors that make this conn seem
          * responsive even though it isn't progressing its message queues. */
 
-        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+       spin_lock_irqsave(&conn->rac_lock, flags);
 
         cfs_list_for_each (ttmp, &conn->rac_fmaq) {
                 tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
 
                 if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
-                        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+                       spin_unlock_irqrestore(&conn->rac_lock, flags);
                         CERROR("tx on fmaq for %s blocked %lu seconds\n",
                                libcfs_nid2str(conn->rac_peer->rap_nid),
                                (now - tx->tx_qtime)/CFS_HZ);
@@ -964,7 +964,7 @@ kranal_check_conn_timeouts (kra_conn_t *conn)
                 tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
 
                 if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
-                        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+                       spin_unlock_irqrestore(&conn->rac_lock, flags);
                         CERROR("tx on rdmaq for %s blocked %lu seconds\n",
                                libcfs_nid2str(conn->rac_peer->rap_nid), 
                                (now - tx->tx_qtime)/CFS_HZ);
@@ -976,7 +976,7 @@ kranal_check_conn_timeouts (kra_conn_t *conn)
                 tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
 
                 if (cfs_time_aftereq(now, tx->tx_qtime + timeout)) {
-                        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+                       spin_unlock_irqrestore(&conn->rac_lock, flags);
                         CERROR("tx on replyq for %s blocked %lu seconds\n",
                                libcfs_nid2str(conn->rac_peer->rap_nid),
                                (now - tx->tx_qtime)/CFS_HZ);
@@ -984,7 +984,7 @@ kranal_check_conn_timeouts (kra_conn_t *conn)
                 }
         }
 
-        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+       spin_unlock_irqrestore(&conn->rac_lock, flags);
         return 0;
 }
 
@@ -1000,7 +1000,7 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
  again:
         /* NB. We expect to check all the conns and not find any problems, so
          * we just use a shared lock while we take a look... */
-        cfs_read_lock(&kranal_data.kra_global_lock);
+       read_lock(&kranal_data.kra_global_lock);
 
         cfs_list_for_each (ctmp, conns) {
                 conn = cfs_list_entry(ctmp, kra_conn_t, rac_hashlist);
@@ -1015,13 +1015,13 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
                         continue;
 
                 kranal_conn_addref(conn);
-                cfs_read_unlock(&kranal_data.kra_global_lock);
+               read_unlock(&kranal_data.kra_global_lock);
 
                 CERROR("Conn to %s, cqid %d timed out\n",
                        libcfs_nid2str(conn->rac_peer->rap_nid), 
                        conn->rac_cqid);
 
-                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+               write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 switch (conn->rac_state) {
                 default:
@@ -1036,7 +1036,7 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
                         break;
                 }
 
-                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
                                             flags);
 
                 kranal_conn_decref(conn);
@@ -1045,7 +1045,7 @@ kranal_reaper_check (int idx, unsigned long *min_timeoutp)
                 goto again;
         }
 
-        cfs_read_unlock(&kranal_data.kra_global_lock);
+       read_unlock(&kranal_data.kra_global_lock);
 }
 
 int
@@ -1065,7 +1065,7 @@ kranal_connd (void *arg)
 
         cfs_waitlink_init(&wait);
 
-        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
 
         while (!kranal_data.kra_shutdown) {
                 did_something = 0;
@@ -1075,7 +1075,7 @@ kranal_connd (void *arg)
                                              kra_acceptsock_t, ras_list);
                         cfs_list_del(&ras->ras_list);
 
-                        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+                       spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
                                                    flags);
 
                         CDEBUG(D_NET,"About to handshake someone\n");
@@ -1085,7 +1085,7 @@ kranal_connd (void *arg)
 
                         CDEBUG(D_NET,"Finished handshaking someone\n");
 
-                        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+                       spin_lock_irqsave(&kranal_data.kra_connd_lock,
                                               flags);
                         did_something = 1;
                 }
@@ -1095,13 +1095,13 @@ kranal_connd (void *arg)
                                               kra_peer_t, rap_connd_list);
 
                         cfs_list_del_init(&peer->rap_connd_list);
-                        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
+                       spin_unlock_irqrestore(&kranal_data.kra_connd_lock,
                                                    flags);
 
                         kranal_connect(peer);
                         kranal_peer_decref(peer);
 
-                        cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock,
+                       spin_lock_irqsave(&kranal_data.kra_connd_lock,
                                               flags);
                         did_something = 1;
                 }
@@ -1112,17 +1112,17 @@ kranal_connd (void *arg)
                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                 cfs_waitq_add_exclusive(&kranal_data.kra_connd_waitq, &wait);
 
-                cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+               spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
                 cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE);
 
                 cfs_set_current_state(CFS_TASK_RUNNING);
                 cfs_waitq_del(&kranal_data.kra_connd_waitq, &wait);
 
-                cfs_spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
+               spin_lock_irqsave(&kranal_data.kra_connd_lock, flags);
         }
 
-        cfs_spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
+       spin_unlock_irqrestore(&kranal_data.kra_connd_lock, flags);
 
         kranal_thread_fini();
         return 0;
@@ -1135,12 +1135,12 @@ kranal_update_reaper_timeout(long timeout)
 
         LASSERT (timeout > 0);
 
-        cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
         if (timeout < kranal_data.kra_new_min_timeout)
                 kranal_data.kra_new_min_timeout = timeout;
 
-        cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+       spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 }
 
 int
@@ -1162,7 +1162,7 @@ kranal_reaper (void *arg)
 
         cfs_waitlink_init(&wait);
 
-        cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+       spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
         while (!kranal_data.kra_shutdown) {
                 /* I wake up every 'p' seconds to check for timeouts on some
@@ -1181,13 +1181,13 @@ kranal_reaper (void *arg)
                         cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                         cfs_waitq_add(&kranal_data.kra_reaper_waitq, &wait);
 
-                        cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
+                       spin_unlock_irqrestore(&kranal_data.kra_reaper_lock,
                                                    flags);
 
                         cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE,
                                             timeout);
 
-                        cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock,
+                       spin_lock_irqsave(&kranal_data.kra_reaper_lock,
                                               flags);
 
                         cfs_set_current_state(CFS_TASK_RUNNING);
@@ -1216,7 +1216,7 @@ kranal_reaper (void *arg)
                 }
                 min_timeout = current_min_timeout;
 
-                cfs_spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
+               spin_unlock_irqrestore(&kranal_data.kra_reaper_lock, flags);
 
                 LASSERT (min_timeout > 0);
 
@@ -1237,7 +1237,7 @@ kranal_reaper (void *arg)
 
                 next_check_time += p * CFS_HZ;
 
-                cfs_spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
+               spin_lock_irqsave(&kranal_data.kra_reaper_lock, flags);
 
                 if (((conn_index - chunk <= base_index &&
                       base_index < conn_index) ||
@@ -1284,13 +1284,13 @@ kranal_check_rdma_cq (kra_device_t *dev)
                 LASSERT (rrc == RAP_SUCCESS);
                 LASSERT ((event_type & RAPK_CQ_EVENT_OVERRUN) == 0);
 
-                cfs_read_lock(&kranal_data.kra_global_lock);
+               read_lock(&kranal_data.kra_global_lock);
 
                 conn = kranal_cqid2conn_locked(cqid);
                 if (conn == NULL) {
                         /* Conn was destroyed? */
                         CDEBUG(D_NET, "RDMA CQID lookup %d failed\n", cqid);
-                        cfs_read_unlock(&kranal_data.kra_global_lock);
+                       read_unlock(&kranal_data.kra_global_lock);
                         continue;
                 }
 
@@ -1300,7 +1300,7 @@ kranal_check_rdma_cq (kra_device_t *dev)
                 CDEBUG(D_NET, "Completed %p\n",
                        cfs_list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list));
 
-                cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+               spin_lock_irqsave(&conn->rac_lock, flags);
 
                 LASSERT (!cfs_list_empty(&conn->rac_rdmaq));
                 tx = cfs_list_entry(conn->rac_rdmaq.next, kra_tx_t, tx_list);
@@ -1313,13 +1313,13 @@ kranal_check_rdma_cq (kra_device_t *dev)
                 cfs_list_add_tail(&tx->tx_list, &conn->rac_fmaq);
                 tx->tx_qtime = jiffies;
 
-                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+               spin_unlock_irqrestore(&conn->rac_lock, flags);
 
                 /* Get conn's fmaq processed, now I've just put something
                  * there */
                 kranal_schedule_conn(conn);
 
-                cfs_read_unlock(&kranal_data.kra_global_lock);
+               read_unlock(&kranal_data.kra_global_lock);
         }
 }
 
@@ -1345,7 +1345,7 @@ kranal_check_fma_cq (kra_device_t *dev)
 
                 if ((event_type & RAPK_CQ_EVENT_OVERRUN) == 0) {
 
-                        cfs_read_lock(&kranal_data.kra_global_lock);
+                       read_lock(&kranal_data.kra_global_lock);
 
                         conn = kranal_cqid2conn_locked(cqid);
                         if (conn == NULL) {
@@ -1357,7 +1357,7 @@ kranal_check_fma_cq (kra_device_t *dev)
                                 kranal_schedule_conn(conn);
                         }
 
-                        cfs_read_unlock(&kranal_data.kra_global_lock);
+                       read_unlock(&kranal_data.kra_global_lock);
                         continue;
                 }
 
@@ -1367,7 +1367,7 @@ kranal_check_fma_cq (kra_device_t *dev)
 
                 for (i = 0; i < kranal_data.kra_conn_hash_size; i++) {
 
-                        cfs_read_lock(&kranal_data.kra_global_lock);
+                       read_lock(&kranal_data.kra_global_lock);
 
                         conns = &kranal_data.kra_conns[i];
 
@@ -1380,7 +1380,7 @@ kranal_check_fma_cq (kra_device_t *dev)
                         }
 
                         /* don't block write lockers for too long... */
-                        cfs_read_unlock(&kranal_data.kra_global_lock);
+                       read_unlock(&kranal_data.kra_global_lock);
                 }
         }
 }
@@ -1482,21 +1482,21 @@ kranal_process_fmaq (kra_conn_t *conn)
                 if (!conn->rac_close_recvd)
                         return;
 
-                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+               write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 if (conn->rac_state == RANAL_CONN_CLOSING)
                         kranal_terminate_conn_locked(conn);
 
-                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
                                             flags);
                 return;
         }
 
-        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+       spin_lock_irqsave(&conn->rac_lock, flags);
 
         if (cfs_list_empty(&conn->rac_fmaq)) {
 
-                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+               spin_unlock_irqrestore(&conn->rac_lock, flags);
 
                 if (cfs_time_aftereq(jiffies,
                                      conn->rac_last_tx + conn->rac_keepalive *
@@ -1515,7 +1515,7 @@ kranal_process_fmaq (kra_conn_t *conn)
         cfs_list_del(&tx->tx_list);
         more_to_do = !cfs_list_empty(&conn->rac_fmaq);
 
-        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+       spin_unlock_irqrestore(&conn->rac_lock, flags);
 
         expect_reply = 0;
         CDEBUG(D_NET, "sending regular msg: %p, type %02x, cookie "LPX64"\n",
@@ -1572,9 +1572,9 @@ kranal_process_fmaq (kra_conn_t *conn)
                 /* I need credits to send this.  Replace tx at the head of the
                  * fmaq and I'll get rescheduled when credits appear */
                 CDEBUG(D_NET, "EAGAIN on %p\n", conn);
-                cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+               spin_lock_irqsave(&conn->rac_lock, flags);
                 cfs_list_add(&tx->tx_list, &conn->rac_fmaq);
-                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+               spin_unlock_irqrestore(&conn->rac_lock, flags);
                 return;
         }
 
@@ -1583,10 +1583,10 @@ kranal_process_fmaq (kra_conn_t *conn)
         } else {
                 /* LASSERT(current) above ensures this doesn't race with reply
                  * processing */
-                cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+               spin_lock_irqsave(&conn->rac_lock, flags);
                 cfs_list_add_tail(&tx->tx_list, &conn->rac_replyq);
                 tx->tx_qtime = jiffies;
-                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+               spin_unlock_irqrestore(&conn->rac_lock, flags);
         }
 
         if (more_to_do) {
@@ -1613,7 +1613,7 @@ kranal_match_reply(kra_conn_t *conn, int type, __u64 cookie)
         kra_tx_t         *tx;
         unsigned long     flags;
 
-        cfs_spin_lock_irqsave(&conn->rac_lock, flags);
+       spin_lock_irqsave(&conn->rac_lock, flags);
 
         cfs_list_for_each(ttmp, &conn->rac_replyq) {
                 tx = cfs_list_entry(ttmp, kra_tx_t, tx_list);
@@ -1625,7 +1625,7 @@ kranal_match_reply(kra_conn_t *conn, int type, __u64 cookie)
                         continue;
 
                 if (tx->tx_msg.ram_type != type) {
-                        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+                       spin_unlock_irqrestore(&conn->rac_lock, flags);
                         CWARN("Unexpected type %x (%x expected) "
                               "matched reply from %s\n",
                               tx->tx_msg.ram_type, type,
@@ -1634,11 +1634,11 @@ kranal_match_reply(kra_conn_t *conn, int type, __u64 cookie)
                 }
 
                 cfs_list_del(&tx->tx_list);
-                cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+               spin_unlock_irqrestore(&conn->rac_lock, flags);
                 return tx;
         }
 
-        cfs_spin_unlock_irqrestore(&conn->rac_lock, flags);
+       spin_unlock_irqrestore(&conn->rac_lock, flags);
         CWARN("Unmatched reply %02x/"LPX64" from %s\n",
               type, cookie, libcfs_nid2str(conn->rac_peer->rap_nid));
         return NULL;
@@ -1753,7 +1753,7 @@ kranal_check_fma_rx (kra_conn_t *conn)
         if (msg->ram_type == RANAL_MSG_CLOSE) {
                 CWARN("RX CLOSE from %s\n", libcfs_nid2str(conn->rac_peer->rap_nid));
                 conn->rac_close_recvd = 1;
-                cfs_write_lock_irqsave(&kranal_data.kra_global_lock, flags);
+               write_lock_irqsave(&kranal_data.kra_global_lock, flags);
 
                 if (conn->rac_state == RANAL_CONN_ESTABLISHED)
                         kranal_close_conn_locked(conn, 0);
@@ -1761,7 +1761,7 @@ kranal_check_fma_rx (kra_conn_t *conn)
                          conn->rac_close_sent)
                         kranal_terminate_conn_locked(conn);
 
-                cfs_write_unlock_irqrestore(&kranal_data.kra_global_lock,
+               write_unlock_irqrestore(&kranal_data.kra_global_lock,
                                             flags);
                 goto out;
         }
@@ -1950,18 +1950,18 @@ kranal_scheduler (void *arg)
         dev->rad_scheduler = current;
         cfs_waitlink_init(&wait);
 
-        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+       spin_lock_irqsave(&dev->rad_lock, flags);
 
         while (!kranal_data.kra_shutdown) {
                 /* Safe: kra_shutdown only set when quiescent */
 
                 if (busy_loops++ >= RANAL_RESCHED) {
-                        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+                       spin_unlock_irqrestore(&dev->rad_lock, flags);
 
                         cfs_cond_resched();
                         busy_loops = 0;
 
-                        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+                       spin_lock_irqsave(&dev->rad_lock, flags);
                 }
 
                 dropped_lock = 0;
@@ -1969,13 +1969,13 @@ kranal_scheduler (void *arg)
                 if (dev->rad_ready) {
                         /* Device callback fired since I last checked it */
                         dev->rad_ready = 0;
-                        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+                       spin_unlock_irqrestore(&dev->rad_lock, flags);
                         dropped_lock = 1;
 
                         kranal_check_rdma_cq(dev);
                         kranal_check_fma_cq(dev);
 
-                        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+                       spin_lock_irqsave(&dev->rad_lock, flags);
                 }
 
                 cfs_list_for_each_safe(tmp, nxt, &dev->rad_ready_conns) {
@@ -1984,7 +1984,7 @@ kranal_scheduler (void *arg)
                         cfs_list_del_init(&conn->rac_schedlist);
                         LASSERT (conn->rac_scheduled);
                         conn->rac_scheduled = 0;
-                        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+                       spin_unlock_irqrestore(&dev->rad_lock, flags);
                         dropped_lock = 1;
 
                         kranal_check_fma_rx(conn);
@@ -1994,7 +1994,7 @@ kranal_scheduler (void *arg)
                                 kranal_complete_closed_conn(conn);
 
                         kranal_conn_decref(conn);
-                        cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+                       spin_lock_irqsave(&dev->rad_lock, flags);
                 }
 
                 nsoonest = 0;
@@ -2006,22 +2006,22 @@ kranal_scheduler (void *arg)
                         deadline = conn->rac_last_tx + conn->rac_keepalive;
                         if (cfs_time_aftereq(jiffies, deadline)) {
                                 /* Time to process this new conn */
-                                cfs_spin_unlock_irqrestore(&dev->rad_lock,
+                               spin_unlock_irqrestore(&dev->rad_lock,
                                                            flags);
                                 dropped_lock = 1;
 
                                 rc = kranal_process_new_conn(conn);
                                 if (rc != -EAGAIN) {
                                         /* All done with this conn */
-                                        cfs_spin_lock_irqsave(&dev->rad_lock,
+                                       spin_lock_irqsave(&dev->rad_lock,
                                                               flags);
                                         cfs_list_del_init(&conn->rac_schedlist);
-                                        cfs_spin_unlock_irqrestore(&dev-> \
+                                       spin_unlock_irqrestore(&dev-> \
                                                                    rad_lock,
                                                                    flags);
 
                                         kranal_conn_decref(conn);
-                                        cfs_spin_lock_irqsave(&dev->rad_lock,
+                                       spin_lock_irqsave(&dev->rad_lock,
                                                               flags);
                                         continue;
                                 }
@@ -2035,7 +2035,7 @@ kranal_scheduler (void *arg)
                                         conn->rac_keepalive += CFS_HZ;
                                 
                                 deadline = conn->rac_last_tx + conn->rac_keepalive;
-                                cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+                               spin_lock_irqsave(&dev->rad_lock, flags);
                         }
 
                         /* Does this conn need attention soonest? */
@@ -2049,7 +2049,7 @@ kranal_scheduler (void *arg)
 
                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                 cfs_waitq_add_exclusive(&dev->rad_waitq, &wait);
-                cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+               spin_unlock_irqrestore(&dev->rad_lock, flags);
 
                 if (nsoonest == 0) {
                         busy_loops = 0;
@@ -2066,10 +2066,10 @@ kranal_scheduler (void *arg)
 
                 cfs_waitq_del(&dev->rad_waitq, &wait);
                 cfs_set_current_state(CFS_TASK_RUNNING);
-                cfs_spin_lock_irqsave(&dev->rad_lock, flags);
+               spin_lock_irqsave(&dev->rad_lock, flags);
         }
 
-        cfs_spin_unlock_irqrestore(&dev->rad_lock, flags);
+       spin_unlock_irqrestore(&dev->rad_lock, flags);
 
         dev->rad_scheduler = NULL;
         kranal_thread_fini();
index c26de82..07388f0 100644 (file)
@@ -128,24 +128,24 @@ ksocknal_create_peer (ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
         CFS_INIT_LIST_HEAD (&peer->ksnp_routes);
         CFS_INIT_LIST_HEAD (&peer->ksnp_tx_queue);
         CFS_INIT_LIST_HEAD (&peer->ksnp_zc_req_list);
-        cfs_spin_lock_init(&peer->ksnp_lock);
+       spin_lock_init(&peer->ksnp_lock);
 
-        cfs_spin_lock_bh (&net->ksnn_lock);
+       spin_lock_bh(&net->ksnn_lock);
 
-        if (net->ksnn_shutdown) {
-                cfs_spin_unlock_bh (&net->ksnn_lock);
+       if (net->ksnn_shutdown) {
+               spin_unlock_bh(&net->ksnn_lock);
 
-                LIBCFS_FREE(peer, sizeof(*peer));
-                CERROR("Can't create peer: network shutdown\n");
-                return -ESHUTDOWN;
-        }
+               LIBCFS_FREE(peer, sizeof(*peer));
+               CERROR("Can't create peer: network shutdown\n");
+               return -ESHUTDOWN;
+       }
 
-        net->ksnn_npeers++;
+       net->ksnn_npeers++;
 
-        cfs_spin_unlock_bh (&net->ksnn_lock);
+       spin_unlock_bh(&net->ksnn_lock);
 
-        *peerp = peer;
-        return 0;
+       *peerp = peer;
+       return 0;
 }
 
 void
@@ -169,9 +169,9 @@ ksocknal_destroy_peer (ksock_peer_t *peer)
          * until they are destroyed, so we can be assured that _all_ state to
          * do with this peer has been cleaned up when its refcount drops to
          * zero. */
-        cfs_spin_lock_bh (&net->ksnn_lock);
-        net->ksnn_npeers--;
-        cfs_spin_unlock_bh (&net->ksnn_lock);
+       spin_lock_bh(&net->ksnn_lock);
+       net->ksnn_npeers--;
+       spin_unlock_bh(&net->ksnn_lock);
 }
 
 ksock_peer_t *
@@ -207,11 +207,11 @@ ksocknal_find_peer (lnet_ni_t *ni, lnet_process_id_t id)
 {
         ksock_peer_t     *peer;
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
-        peer = ksocknal_find_peer_locked (ni, id);
-        if (peer != NULL)                       /* +1 ref for caller? */
-                ksocknal_peer_addref(peer);
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
+       peer = ksocknal_find_peer_locked(ni, id);
+       if (peer != NULL)                       /* +1 ref for caller? */
+               ksocknal_peer_addref(peer);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 
         return (peer);
 }
@@ -259,7 +259,7 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
         int                j;
         int                rc = -ENOENT;
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
 
@@ -317,7 +317,7 @@ ksocknal_get_peer_info (lnet_ni_t *ni, int index,
                 }
         }
  out:
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
         return (rc);
 }
 
@@ -472,7 +472,7 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
                 return (-ENOMEM);
         }
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         /* always called with a ref on ni, so shutdown can't have started */
         LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
@@ -504,7 +504,7 @@ ksocknal_add_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
                 route2->ksnr_share_count++;
         }
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         return (0);
 }
@@ -576,7 +576,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
         int                i;
         int                rc = -ENOENT;
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         if (id.nid != LNET_NID_ANY)
                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
@@ -616,7 +616,7 @@ ksocknal_del_peer (lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
                 }
         }
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         ksocknal_txlist_done(ni, &zombies, 1);
 
@@ -632,7 +632,7 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
         cfs_list_t        *ctmp;
         int                i;
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 cfs_list_for_each (ptmp, &ksocknal_data.ksnd_peers[i]) {
@@ -650,14 +650,14 @@ ksocknal_get_conn_by_idx (lnet_ni_t *ni, int index)
                                 conn = cfs_list_entry (ctmp, ksock_conn_t,
                                                        ksnc_list);
                                 ksocknal_conn_addref(conn);
-                                cfs_read_unlock (&ksocknal_data. \
+                               read_unlock(&ksocknal_data. \
                                                  ksnd_global_lock);
                                 return (conn);
                         }
                 }
         }
 
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
         return (NULL);
 }
 
@@ -691,7 +691,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
         int                i;
         int                nip;
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
         nip = net->ksnn_ninterfaces;
         LASSERT (nip <= LNET_MAX_INTERFACES);
@@ -699,7 +699,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
         /* Only offer interfaces for additional connections if I have 
          * more than one. */
         if (nip < 2) {
-                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+               read_unlock(&ksocknal_data.ksnd_global_lock);
                 return 0;
         }
 
@@ -708,7 +708,7 @@ ksocknal_local_ipvec (lnet_ni_t *ni, __u32 *ipaddrs)
                 LASSERT (ipaddrs[i] != 0);
         }
 
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
         return (nip);
 }
 
@@ -747,7 +747,7 @@ ksocknal_match_peerip (ksock_interface_t *iface, __u32 *ips, int nips)
 int
 ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 {
-        cfs_rwlock_t       *global_lock = &ksocknal_data.ksnd_global_lock;
+       rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
         ksock_net_t        *net = peer->ksnp_ni->ni_data;
         ksock_interface_t  *iface;
         ksock_interface_t  *best_iface;
@@ -769,7 +769,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
         /* Also note that I'm not going to return more than n_peerips
          * interfaces, even if I have more myself */
 
-        cfs_write_lock_bh (global_lock);
+       write_lock_bh(global_lock);
 
         LASSERT (n_peerips <= LNET_MAX_INTERFACES);
         LASSERT (net->ksnn_ninterfaces <= LNET_MAX_INTERFACES);
@@ -845,7 +845,7 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
         /* Overwrite input peer IP addresses */
         memcpy(peerips, peer->ksnp_passive_ips, n_ips * sizeof(*peerips));
 
-        cfs_write_unlock_bh (global_lock);
+       write_unlock_bh(global_lock);
 
         return (n_ips);
 }
@@ -855,7 +855,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                        __u32 *peer_ipaddrs, int npeer_ipaddrs)
 {
         ksock_route_t       *newroute = NULL;
-        cfs_rwlock_t        *global_lock = &ksocknal_data.ksnd_global_lock;
+       rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
         lnet_ni_t           *ni = peer->ksnp_ni;
         ksock_net_t         *net = ni->ni_data;
         cfs_list_t          *rtmp;
@@ -873,12 +873,12 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
          * expecting to be dealing with small numbers of interfaces, so the
          * O(n**3)-ness here shouldn't matter */
 
-        cfs_write_lock_bh (global_lock);
+       write_lock_bh(global_lock);
 
         if (net->ksnn_ninterfaces < 2) {
                 /* Only create additional connections 
                  * if I have > 1 interface */
-                cfs_write_unlock_bh (global_lock);
+               write_unlock_bh(global_lock);
                 return;
         }
 
@@ -888,13 +888,13 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                 if (newroute != NULL) {
                         newroute->ksnr_ipaddr = peer_ipaddrs[i];
                 } else {
-                        cfs_write_unlock_bh (global_lock);
+                       write_unlock_bh(global_lock);
 
                         newroute = ksocknal_create_route(peer_ipaddrs[i], port);
                         if (newroute == NULL)
                                 return;
 
-                        cfs_write_lock_bh (global_lock);
+                       write_lock_bh(global_lock);
                 }
 
                 if (peer->ksnp_closing) {
@@ -963,7 +963,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                 newroute = NULL;
         }
 
-        cfs_write_unlock_bh (global_lock);
+       write_unlock_bh(global_lock);
         if (newroute != NULL)
                 ksocknal_route_decref(newroute);
 }
@@ -991,12 +991,12 @@ ksocknal_accept (lnet_ni_t *ni, cfs_socket_t *sock)
         cr->ksncr_ni   = ni;
         cr->ksncr_sock = sock;
 
-        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+       spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 
         cfs_list_add_tail(&cr->ksncr_list, &ksocknal_data.ksnd_connd_connreqs);
         cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
 
-        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+       spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
         return 0;
 }
 
@@ -1018,7 +1018,7 @@ int
 ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                       cfs_socket_t *sock, int type)
 {
-        cfs_rwlock_t      *global_lock = &ksocknal_data.ksnd_global_lock;
+       rwlock_t                *global_lock = &ksocknal_data.ksnd_global_lock;
         CFS_LIST_HEAD     (zombies);
         lnet_process_id_t  peerid;
         cfs_list_t        *tmp;
@@ -1092,9 +1092,9 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 hello->kshm_nips = ksocknal_local_ipvec(ni, hello->kshm_ips);
                 peerid = peer->ksnp_id;
 
-                cfs_write_lock_bh(global_lock);
+               write_lock_bh(global_lock);
                 conn->ksnc_proto = peer->ksnp_proto;
-                cfs_write_unlock_bh(global_lock);
+               write_unlock_bh(global_lock);
 
                 if (conn->ksnc_proto == NULL) {
                          conn->ksnc_proto = &ksocknal_protocol_v3x;
@@ -1129,13 +1129,13 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
 
         if (active) {
                 ksocknal_peer_addref(peer);
-                cfs_write_lock_bh (global_lock);
+               write_lock_bh(global_lock);
         } else {
                 rc = ksocknal_create_peer(&peer, ni, peerid);
                 if (rc != 0)
                         goto failed_1;
 
-                cfs_write_lock_bh (global_lock);
+               write_lock_bh(global_lock);
 
                 /* called with a ref on ni, so shutdown can't have started */
                 LASSERT (((ksock_net_t *) ni->ni_data)->ksnn_shutdown == 0);
@@ -1288,7 +1288,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 ksocknal_queue_tx_locked (tx, conn);
         }
 
-        cfs_write_unlock_bh (global_lock);
+       write_unlock_bh(global_lock);
 
         /* We've now got a new connection.  Any errors from here on are just
          * like "normal" comms errors and we close the connection normally.
@@ -1326,7 +1326,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         if (rc == 0)
                 rc = ksocknal_lib_setup_sock(sock);
 
-        cfs_write_lock_bh(global_lock);
+       write_lock_bh(global_lock);
 
         /* NB my callbacks block while I hold ksnd_global_lock */
         ksocknal_lib_set_callback(sock, conn);
@@ -1334,15 +1334,15 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
         if (!active)
                 peer->ksnp_accepting--;
 
-        cfs_write_unlock_bh(global_lock);
+       write_unlock_bh(global_lock);
 
         if (rc != 0) {
-                cfs_write_lock_bh(global_lock);
+               write_lock_bh(global_lock);
                 if (!conn->ksnc_closing) {
                         /* could be closed by another thread */
                         ksocknal_close_conn_locked(conn, rc);
                 }
-                cfs_write_unlock_bh(global_lock);
+               write_unlock_bh(global_lock);
         } else if (ksocknal_connsock_addref(conn) == 0) {
                 /* Allow I/O to proceed. */
                 ksocknal_read_callback(conn);
@@ -1363,7 +1363,7 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                 ksocknal_unlink_peer_locked(peer);
         }
 
-        cfs_write_unlock_bh (global_lock);
+       write_unlock_bh(global_lock);
 
         if (warn != NULL) {
                 if (rc < 0)
@@ -1383,9 +1383,9 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
                         ksocknal_send_hello(ni, conn, peerid.nid, hello);
                 }
 
-                cfs_write_lock_bh(global_lock);
+               write_lock_bh(global_lock);
                 peer->ksnp_accepting--;
-                cfs_write_unlock_bh(global_lock);
+               write_unlock_bh(global_lock);
         }
 
         ksocknal_txlist_done(ni, &zombies, 1);
@@ -1464,10 +1464,10 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
                                                 tx_list)
                                 ksocknal_tx_prep(conn, tx);
 
-                        cfs_spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
-                        cfs_list_splice_init(&peer->ksnp_tx_queue,
-                                             &conn->ksnc_tx_queue);
-                        cfs_spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
+                       spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+                       cfs_list_splice_init(&peer->ksnp_tx_queue,
+                                            &conn->ksnc_tx_queue);
+                       spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
                 }
 
                 peer->ksnp_proto = NULL;        /* renegotiate protocol version */
@@ -1480,13 +1480,13 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
                 }
         }
 
-        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+       spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
-        cfs_list_add_tail (&conn->ksnc_list,
-                           &ksocknal_data.ksnd_deathrow_conns);
-        cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
+       cfs_list_add_tail(&conn->ksnc_list,
+                         &ksocknal_data.ksnd_deathrow_conns);
+       cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
 
-        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+       spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 }
 
 void
@@ -1499,7 +1499,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
          * tell LNET I think the peer is dead if it's to another kernel and
          * there are no connections or connection attempts in existance. */
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
         if ((peer->ksnp_id.pid & LNET_PID_USERFLAG) == 0 &&
             cfs_list_empty(&peer->ksnp_conns) &&
@@ -1509,7 +1509,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
                 last_alive = peer->ksnp_last_alive;
         }
 
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 
         if (notify)
                 lnet_notify (peer->ksnp_ni, peer->ksnp_id.nid, 0,
@@ -1528,7 +1528,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
          * abort all buffered data */
         LASSERT (conn->ksnc_sock == NULL);
 
-        cfs_spin_lock(&peer->ksnp_lock);
+       spin_lock(&peer->ksnp_lock);
 
         cfs_list_for_each_entry_safe_typed(tx, tmp, &peer->ksnp_zc_req_list,
                                            ksock_tx_t, tx_zc_list) {
@@ -1543,7 +1543,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
                 cfs_list_add(&tx->tx_zc_list, &zlist);
         }
 
-        cfs_spin_unlock(&peer->ksnp_lock);
+       spin_unlock(&peer->ksnp_lock);
 
         while (!cfs_list_empty(&zlist)) {
                 tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
@@ -1567,7 +1567,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
         LASSERT(conn->ksnc_closing);
 
         /* wake up the scheduler to "send" all remaining packets to /dev/null */
-        cfs_spin_lock_bh (&sched->kss_lock);
+       spin_lock_bh(&sched->kss_lock);
 
         /* a closing conn is always ready to tx */
         conn->ksnc_tx_ready = 1;
@@ -1583,10 +1583,10 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        cfs_spin_unlock_bh (&sched->kss_lock);
+       spin_unlock_bh(&sched->kss_lock);
 
-        /* serialise with callbacks */
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       /* serialise with callbacks */
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         ksocknal_lib_reset_callback(conn->ksnc_sock, conn);
 
@@ -1601,7 +1601,7 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
                 peer->ksnp_error = 0;     /* avoid multiple notifications */
         }
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         if (failed)
                 ksocknal_peer_failed(peer);
@@ -1617,15 +1617,15 @@ ksocknal_terminate_conn (ksock_conn_t *conn)
 void
 ksocknal_queue_zombie_conn (ksock_conn_t *conn)
 {
-        /* Queue the conn for the reaper to destroy */
+       /* Queue the conn for the reaper to destroy */
 
-        LASSERT (cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
-        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+       LASSERT(cfs_atomic_read(&conn->ksnc_conn_refcount) == 0);
+       spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
-        cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
-        cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
+       cfs_list_add_tail(&conn->ksnc_list, &ksocknal_data.ksnd_zombie_conns);
+       cfs_waitq_signal(&ksocknal_data.ksnd_reaper_waitq);
 
-        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+       spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 }
 
 void
@@ -1721,11 +1721,11 @@ ksocknal_close_conn_and_siblings (ksock_conn_t *conn, int why)
         __u32             ipaddr = conn->ksnc_ipaddr;
         int               count;
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         count = ksocknal_close_peer_conns_locked (peer, ipaddr, why);
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         return (count);
 }
@@ -1741,7 +1741,7 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
         int                 i;
         int                 count = 0;
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         if (id.nid != LNET_NID_ANY)
                 lo = hi = (int)(ksocknal_nid2peerlist(id.nid) - ksocknal_data.ksnd_peers);
@@ -1764,7 +1764,7 @@ ksocknal_close_matching_conns (lnet_process_id_t id, __u32 ipaddr)
                 }
         }
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         /* wildcards always succeed */
         if (id.nid == LNET_NID_ANY || id.pid == LNET_PID_ANY || ipaddr == 0)
@@ -1803,10 +1803,10 @@ ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
         cfs_time_t         last_alive = 0;
         cfs_time_t         now = cfs_time_current();
         ksock_peer_t      *peer = NULL;
-        cfs_rwlock_t      *glock = &ksocknal_data.ksnd_global_lock;
+       rwlock_t                *glock = &ksocknal_data.ksnd_global_lock;
         lnet_process_id_t  id = {.nid = nid, .pid = LUSTRE_SRV_LNET_PID};
 
-        cfs_read_lock(glock);
+       read_lock(glock);
 
         peer = ksocknal_find_peer_locked(ni, id);
         if (peer != NULL) {
@@ -1832,7 +1832,7 @@ ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
                         connect = 0;
         }
 
-        cfs_read_unlock(glock);
+       read_unlock(glock);
 
         if (last_alive != 0)
                 *when = last_alive;
@@ -1847,13 +1847,13 @@ ksocknal_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
 
         ksocknal_add_peer(ni, id, LNET_NIDADDR(nid), lnet_acceptor_port());
 
-        cfs_write_lock_bh(glock);
+       write_lock_bh(glock);
 
         peer = ksocknal_find_peer_locked(ni, id);
         if (peer != NULL)
                 ksocknal_launch_all_connections_locked(peer);
 
-        cfs_write_unlock_bh(glock);
+       write_unlock_bh(glock);
         return;
 }
 
@@ -1866,7 +1866,7 @@ ksocknal_push_peer (ksock_peer_t *peer)
         ksock_conn_t     *conn;
 
         for (index = 0; ; index++) {
-                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+               read_lock(&ksocknal_data.ksnd_global_lock);
 
                 i = 0;
                 conn = NULL;
@@ -1880,7 +1880,7 @@ ksocknal_push_peer (ksock_peer_t *peer)
                         }
                 }
 
-                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+               read_unlock(&ksocknal_data.ksnd_global_lock);
 
                 if (conn == NULL)
                         break;
@@ -1902,7 +1902,7 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 for (j = 0; ; j++) {
-                        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+                       read_lock(&ksocknal_data.ksnd_global_lock);
 
                         index = 0;
                         peer = NULL;
@@ -1925,7 +1925,7 @@ ksocknal_push (lnet_ni_t *ni, lnet_process_id_t id)
                                 }
                         }
 
-                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+                       read_unlock(&ksocknal_data.ksnd_global_lock);
 
                         if (peer != NULL) {
                                 rc = 0;
@@ -1956,7 +1956,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
             netmask == 0)
                 return (-EINVAL);
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         iface = ksocknal_ip2iface(ni, ipaddress);
         if (iface != NULL) {
@@ -1996,7 +1996,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
                 /* NB only new connections will pay attention to the new interface! */
         }
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
@@ -2054,7 +2054,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
         int                i;
         int                j;
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < net->ksnn_ninterfaces; i++) {
                 this_ip = net->ksnn_interfaces[i].ksni_ipaddr;
@@ -2085,7 +2085,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
                 }
         }
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
@@ -2102,7 +2102,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                 ksock_net_t       *net = ni->ni_data;
                 ksock_interface_t *iface;
 
-                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+               read_lock(&ksocknal_data.ksnd_global_lock);
 
                 if (data->ioc_count >= (__u32)net->ksnn_ninterfaces) {
                         rc = -ENOENT;
@@ -2116,7 +2116,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                         data->ioc_u32[3] = iface->ksni_nroutes;
                 }
 
-                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+               read_unlock(&ksocknal_data.ksnd_global_lock);
                 return rc;
         }
 
@@ -2240,24 +2240,24 @@ ksocknal_free_buffers (void)
                      sizeof (cfs_list_t) *
                      ksocknal_data.ksnd_peer_hash_size);
 
-        cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+       spin_lock(&ksocknal_data.ksnd_tx_lock);
 
-        if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-                cfs_list_t        zlist;
-                ksock_tx_t       *tx;
+       if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+               cfs_list_t      zlist;
+               ksock_tx_t      *tx;
 
-                cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
-                cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
-                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
+               cfs_list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
+               cfs_list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
+               spin_unlock(&ksocknal_data.ksnd_tx_lock);
 
-                while(!cfs_list_empty(&zlist)) {
-                        tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
-                        cfs_list_del(&tx->tx_list);
-                        LIBCFS_FREE(tx, tx->tx_desc_size);
-                }
-        } else {
-                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
-        }
+               while (!cfs_list_empty(&zlist)) {
+                       tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_list);
+                       cfs_list_del(&tx->tx_list);
+                       LIBCFS_FREE(tx, tx->tx_desc_size);
+               }
+       } else {
+               spin_unlock(&ksocknal_data.ksnd_tx_lock);
+       }
 }
 
 void
@@ -2328,17 +2328,17 @@ ksocknal_base_shutdown(void)
                }
 
                 i = 4;
-                cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+               read_lock(&ksocknal_data.ksnd_global_lock);
                 while (ksocknal_data.ksnd_nthreads != 0) {
                         i++;
                         CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                                "waiting for %d threads to terminate\n",
                                 ksocknal_data.ksnd_nthreads);
-                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+                       read_unlock(&ksocknal_data.ksnd_global_lock);
                         cfs_pause(cfs_time_seconds(1));
-                        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+                       read_lock(&ksocknal_data.ksnd_global_lock);
                 }
-                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+               read_unlock(&ksocknal_data.ksnd_global_lock);
 
                 ksocknal_free_buffers();
 
@@ -2389,21 +2389,21 @@ ksocknal_base_startup(void)
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++)
                 CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_peers[i]);
 
-        cfs_rwlock_init(&ksocknal_data.ksnd_global_lock);
+       rwlock_init(&ksocknal_data.ksnd_global_lock);
        CFS_INIT_LIST_HEAD(&ksocknal_data.ksnd_nets);
 
-        cfs_spin_lock_init (&ksocknal_data.ksnd_reaper_lock);
+       spin_lock_init(&ksocknal_data.ksnd_reaper_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_enomem_conns);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_zombie_conns);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_deathrow_conns);
         cfs_waitq_init(&ksocknal_data.ksnd_reaper_waitq);
 
-        cfs_spin_lock_init (&ksocknal_data.ksnd_connd_lock);
+       spin_lock_init(&ksocknal_data.ksnd_connd_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_connreqs);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_connd_routes);
         cfs_waitq_init(&ksocknal_data.ksnd_connd_waitq);
 
-        cfs_spin_lock_init (&ksocknal_data.ksnd_tx_lock);
+       spin_lock_init(&ksocknal_data.ksnd_tx_lock);
         CFS_INIT_LIST_HEAD (&ksocknal_data.ksnd_idle_noop_txs);
 
        /* NB memset above zeros whole of ksocknal_data */
@@ -2442,7 +2442,7 @@ ksocknal_base_startup(void)
                        sched = &info->ksi_scheds[nthrs - 1];
 
                        sched->kss_info = info;
-                       cfs_spin_lock_init(&sched->kss_lock);
+                       spin_lock_init(&sched->kss_lock);
                        CFS_INIT_LIST_HEAD(&sched->kss_rx_conns);
                        CFS_INIT_LIST_HEAD(&sched->kss_tx_conns);
                        CFS_INIT_LIST_HEAD(&sched->kss_zombie_noop_txs);
@@ -2465,16 +2465,16 @@ ksocknal_base_startup(void)
         }
 
         for (i = 0; i < *ksocknal_tunables.ksnd_nconnds; i++) {
-                cfs_spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
-                ksocknal_data.ksnd_connd_starting++;
-                cfs_spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
-
-                rc = ksocknal_thread_start (ksocknal_connd,
-                                            (void *)((ulong_ptr_t)i));
-                if (rc != 0) {
-                        cfs_spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
-                        ksocknal_data.ksnd_connd_starting--;
-                        cfs_spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+               spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+               ksocknal_data.ksnd_connd_starting++;
+               spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+
+               rc = ksocknal_thread_start(ksocknal_connd,
+                                          (void *)((ulong_ptr_t)i));
+               if (rc != 0) {
+                       spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+                       ksocknal_data.ksnd_connd_starting--;
+                       spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
                         CERROR("Can't spawn socknal connd: %d\n", rc);
                         goto failed;
                 }
@@ -2499,11 +2499,11 @@ ksocknal_base_startup(void)
 void
 ksocknal_debug_peerhash (lnet_ni_t *ni)
 {
-        ksock_peer_t     *peer = NULL;
-        cfs_list_t       *tmp;
-        int               i;
+       ksock_peer_t    *peer = NULL;
+       cfs_list_t      *tmp;
+       int             i;
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
         for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                 cfs_list_for_each (tmp, &ksocknal_data.ksnd_peers[i]) {
@@ -2546,7 +2546,7 @@ ksocknal_debug_peerhash (lnet_ni_t *ni)
                 }
         }
 
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
         return;
 }
 
@@ -2563,30 +2563,30 @@ ksocknal_shutdown (lnet_ni_t *ni)
         LASSERT(ksocknal_data.ksnd_init == SOCKNAL_INIT_ALL);
         LASSERT(ksocknal_data.ksnd_nnets > 0);
 
-        cfs_spin_lock_bh (&net->ksnn_lock);
-        net->ksnn_shutdown = 1;                 /* prevent new peers */
-        cfs_spin_unlock_bh (&net->ksnn_lock);
+       spin_lock_bh(&net->ksnn_lock);
+       net->ksnn_shutdown = 1;                 /* prevent new peers */
+       spin_unlock_bh(&net->ksnn_lock);
 
-        /* Delete all peers */
-        ksocknal_del_peer(ni, anyid, 0);
+       /* Delete all peers */
+       ksocknal_del_peer(ni, anyid, 0);
 
-        /* Wait for all peer state to clean up */
-        i = 2;
-        cfs_spin_lock_bh (&net->ksnn_lock);
-        while (net->ksnn_npeers != 0) {
-                cfs_spin_unlock_bh (&net->ksnn_lock);
+       /* Wait for all peer state to clean up */
+       i = 2;
+       spin_lock_bh(&net->ksnn_lock);
+       while (net->ksnn_npeers != 0) {
+               spin_unlock_bh(&net->ksnn_lock);
 
-                i++;
-                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
-                       "waiting for %d peers to disconnect\n",
-                       net->ksnn_npeers);
-                cfs_pause(cfs_time_seconds(1));
+               i++;
+               CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
+                      "waiting for %d peers to disconnect\n",
+                      net->ksnn_npeers);
+               cfs_pause(cfs_time_seconds(1));
 
-                ksocknal_debug_peerhash(ni);
+               ksocknal_debug_peerhash(ni);
 
-                cfs_spin_lock_bh (&net->ksnn_lock);
-        }
-        cfs_spin_unlock_bh (&net->ksnn_lock);
+               spin_lock_bh(&net->ksnn_lock);
+       }
+       spin_unlock_bh(&net->ksnn_lock);
 
         for (i = 0; i < net->ksnn_ninterfaces; i++) {
                 LASSERT (net->ksnn_interfaces[i].ksni_npeers == 0);
@@ -2785,7 +2785,7 @@ ksocknal_startup (lnet_ni_t *ni)
         if (net == NULL)
                 goto fail_0;
 
-        cfs_spin_lock_init(&net->ksnn_lock);
+       spin_lock_init(&net->ksnn_lock);
         net->ksnn_incarnation = ksocknal_new_incarnation();
         ni->ni_data = net;
         ni->ni_peertimeout    = *ksocknal_tunables.ksnd_peertimeout;
index 9384f03..06c2484 100644 (file)
@@ -66,7 +66,7 @@ struct ksock_sched_info;
 
 typedef struct                                  /* per scheduler state */
 {
-       cfs_spinlock_t          kss_lock;       /* serialise */
+       spinlock_t              kss_lock;       /* serialise */
        cfs_list_t              kss_rx_conns;   /* conn waiting to be read */
        /* conn waiting to be written */
        cfs_list_t              kss_tx_conns;
@@ -153,13 +153,13 @@ typedef struct
 
 typedef struct
 {
-        __u64             ksnn_incarnation;     /* my epoch */
-        cfs_spinlock_t    ksnn_lock;            /* serialise */
+       __u64             ksnn_incarnation;     /* my epoch */
+       spinlock_t        ksnn_lock;            /* serialise */
        cfs_list_t        ksnn_list;            /* chain on global list */
-        int               ksnn_npeers;          /* # peers */
-        int               ksnn_shutdown;        /* shutting down? */
-        int               ksnn_ninterfaces;     /* IP interfaces */
-        ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
+       int               ksnn_npeers;          /* # peers */
+       int               ksnn_shutdown;        /* shutting down? */
+       int               ksnn_ninterfaces;     /* IP interfaces */
+       ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
 } ksock_net_t;
 
 /** connd timeout */
@@ -173,7 +173,7 @@ typedef struct
        int                     ksnd_nnets;     /* # networks set up */
        cfs_list_t              ksnd_nets;      /* list of nets */
        /* stabilize peer/conn ops */
-       cfs_rwlock_t            ksnd_global_lock;
+       rwlock_t                ksnd_global_lock;
        /* hash table of all my known peers */
        cfs_list_t              *ksnd_peers;
        int                     ksnd_peer_hash_size; /* size of ksnd_peers */
@@ -190,7 +190,7 @@ typedef struct
         cfs_list_t        ksnd_enomem_conns;   /* conns to retry: reaper_lock*/
         cfs_waitq_t       ksnd_reaper_waitq;   /* reaper sleeps here */
         cfs_time_t        ksnd_reaper_waketime;/* when reaper will wake */
-        cfs_spinlock_t    ksnd_reaper_lock;    /* serialise */
+       spinlock_t        ksnd_reaper_lock;     /* serialise */
 
         int               ksnd_enomem_tx;      /* test ENOMEM sender */
         int               ksnd_stall_tx;       /* test sluggish sender */
@@ -208,10 +208,10 @@ typedef struct
         long              ksnd_connd_starting_stamp;
         /** # running connd */
         unsigned          ksnd_connd_running;
-        cfs_spinlock_t    ksnd_connd_lock;     /* serialise */
+       spinlock_t        ksnd_connd_lock;      /* serialise */
 
-        cfs_list_t        ksnd_idle_noop_txs;  /* list head for freed noop tx */
-        cfs_spinlock_t    ksnd_tx_lock;        /* serialise, NOT safe in g_lock */
+       cfs_list_t        ksnd_idle_noop_txs;   /* list head for freed noop tx */
+       spinlock_t        ksnd_tx_lock;         /* serialise, g_lock unsafe */
 
 } ksock_nal_data_t;
 
@@ -374,7 +374,7 @@ typedef struct ksock_peer
         cfs_list_t            ksnp_conns;    /* all active connections */
         cfs_list_t            ksnp_routes;   /* routes */
         cfs_list_t            ksnp_tx_queue; /* waiting packets */
-        cfs_spinlock_t        ksnp_lock;     /* serialize, NOT safe in g_lock */
+       spinlock_t            ksnp_lock;        /* serialize, g_lock unsafe */
         cfs_list_t            ksnp_zc_req_list;   /* zero copy requests wait for ACK  */
         cfs_time_t            ksnp_send_keepalive; /* time to send keepalive */
         lnet_ni_t            *ksnp_ni;       /* which network */
@@ -468,13 +468,13 @@ ksocknal_connsock_addref (ksock_conn_t *conn)
 {
         int   rc = -ESHUTDOWN;
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
-        if (!conn->ksnc_closing) {
-                LASSERT (cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
-                cfs_atomic_inc(&conn->ksnc_sock_refcount);
-                rc = 0;
-        }
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
+       if (!conn->ksnc_closing) {
+               LASSERT(cfs_atomic_read(&conn->ksnc_sock_refcount) > 0);
+               cfs_atomic_inc(&conn->ksnc_sock_refcount);
+               rc = 0;
+       }
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 
         return (rc);
 }
index 72add78..ce762f8 100644 (file)
 ksock_tx_t *
 ksocknal_alloc_tx(int type, int size)
 {
-        ksock_tx_t *tx = NULL;
+       ksock_tx_t *tx = NULL;
 
-        if (type == KSOCK_MSG_NOOP) {
-                LASSERT (size == KSOCK_NOOP_TX_SIZE);
+       if (type == KSOCK_MSG_NOOP) {
+               LASSERT(size == KSOCK_NOOP_TX_SIZE);
 
-                /* searching for a noop tx in free list */
-                cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+               /* searching for a noop tx in free list */
+               spin_lock(&ksocknal_data.ksnd_tx_lock);
 
-                if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
-                        tx = cfs_list_entry(ksocknal_data.ksnd_idle_noop_txs. \
-                                            next, ksock_tx_t, tx_list);
-                        LASSERT(tx->tx_desc_size == size);
-                        cfs_list_del(&tx->tx_list);
-                }
+               if (!cfs_list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
+                       tx = cfs_list_entry(ksocknal_data.ksnd_idle_noop_txs. \
+                                           next, ksock_tx_t, tx_list);
+                       LASSERT(tx->tx_desc_size == size);
+                       cfs_list_del(&tx->tx_list);
+               }
 
-                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
+               spin_unlock(&ksocknal_data.ksnd_tx_lock);
         }
 
         if (tx == NULL)
@@ -93,18 +93,18 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
 void
 ksocknal_free_tx (ksock_tx_t *tx)
 {
-        cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
+       cfs_atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
-        if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
-                /* it's a noop tx */
-                cfs_spin_lock(&ksocknal_data.ksnd_tx_lock);
+       if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
+               /* it's a noop tx */
+               spin_lock(&ksocknal_data.ksnd_tx_lock);
 
-                cfs_list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
+               cfs_list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
 
-                cfs_spin_unlock(&ksocknal_data.ksnd_tx_lock);
-        } else {
-                LIBCFS_FREE(tx, tx->tx_desc_size);
-        }
+               spin_unlock(&ksocknal_data.ksnd_tx_lock);
+       } else {
+               LIBCFS_FREE(tx, tx->tx_desc_size);
+       }
 }
 
 int
@@ -457,7 +457,7 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
 
         ksocknal_tx_addref(tx);
 
-        cfs_spin_lock(&peer->ksnp_lock);
+       spin_lock(&peer->ksnp_lock);
 
         /* ZC_REQ is going to be pinned to the peer */
         tx->tx_deadline =
@@ -472,33 +472,33 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
 
         cfs_list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
 
-        cfs_spin_unlock(&peer->ksnp_lock);
+       spin_unlock(&peer->ksnp_lock);
 }
 
 static void
 ksocknal_uncheck_zc_req(ksock_tx_t *tx)
 {
-        ksock_peer_t   *peer = tx->tx_conn->ksnc_peer;
+       ksock_peer_t   *peer = tx->tx_conn->ksnc_peer;
 
-        LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
-        LASSERT (tx->tx_zc_capable);
+       LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
+       LASSERT(tx->tx_zc_capable);
 
-        tx->tx_zc_checked = 0;
+       tx->tx_zc_checked = 0;
 
-        cfs_spin_lock(&peer->ksnp_lock);
+       spin_lock(&peer->ksnp_lock);
 
-        if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
-                /* Not waiting for an ACK */
-                cfs_spin_unlock(&peer->ksnp_lock);
-                return;
-        }
+       if (tx->tx_msg.ksm_zc_cookies[0] == 0) {
+               /* Not waiting for an ACK */
+               spin_unlock(&peer->ksnp_lock);
+               return;
+       }
 
-        tx->tx_msg.ksm_zc_cookies[0] = 0;
-        cfs_list_del(&tx->tx_zc_list);
+       tx->tx_msg.ksm_zc_cookies[0] = 0;
+       cfs_list_del(&tx->tx_zc_list);
 
-        cfs_spin_unlock(&peer->ksnp_lock);
+       spin_unlock(&peer->ksnp_lock);
 
-        ksocknal_tx_decref(tx);
+       ksocknal_tx_decref(tx);
 }
 
 int
@@ -532,7 +532,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                               counter, conn, cfs_atomic_read(&libcfs_kmemory));
 
                 /* Queue on ksnd_enomem_conns for retry after a timeout */
-                cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+               spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
                 /* enomem list takes over scheduler's ref... */
                 LASSERT (conn->ksnc_tx_scheduled);
@@ -543,7 +543,7 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
                                    ksocknal_data.ksnd_reaper_waketime))
                         cfs_waitq_signal (&ksocknal_data.ksnd_reaper_waitq);
 
-                cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+               spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
                 return (rc);
         }
 
@@ -594,13 +594,13 @@ ksocknal_launch_connection_locked (ksock_route_t *route)
         route->ksnr_scheduled = 1;              /* scheduling conn for connd */
         ksocknal_route_addref(route);           /* extra ref for connd */
 
-        cfs_spin_lock_bh (&ksocknal_data.ksnd_connd_lock);
+       spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
 
-        cfs_list_add_tail (&route->ksnr_connd_list,
-                           &ksocknal_data.ksnd_connd_routes);
-        cfs_waitq_signal (&ksocknal_data.ksnd_connd_waitq);
+       cfs_list_add_tail(&route->ksnr_connd_list,
+                         &ksocknal_data.ksnd_connd_routes);
+       cfs_waitq_signal(&ksocknal_data.ksnd_connd_waitq);
 
-        cfs_spin_unlock_bh (&ksocknal_data.ksnd_connd_lock);
+       spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
 }
 
 void
@@ -729,7 +729,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
          * but they're used inside spinlocks a lot.
          */
         bufnob = libcfs_sock_wmem_queued(conn->ksnc_sock);
-        cfs_spin_lock_bh (&sched->kss_lock);
+       spin_lock_bh(&sched->kss_lock);
 
         if (cfs_list_empty(&conn->ksnc_tx_queue) && bufnob == 0) {
                 /* First packet starts the timeout */
@@ -775,7 +775,7 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        cfs_spin_unlock_bh (&sched->kss_lock);
+       spin_unlock_bh(&sched->kss_lock);
 }
 
 
@@ -839,7 +839,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 {
         ksock_peer_t     *peer;
         ksock_conn_t     *conn;
-        cfs_rwlock_t     *g_lock;
+       rwlock_t     *g_lock;
         int               retry;
         int               rc;
 
@@ -848,7 +848,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
         g_lock = &ksocknal_data.ksnd_global_lock;
 
         for (retry = 0;; retry = 1) {
-                cfs_read_lock (g_lock);
+               read_lock(g_lock);
                 peer = ksocknal_find_peer_locked(ni, id);
                 if (peer != NULL) {
                         if (ksocknal_find_connectable_route_locked(peer) == NULL) {
@@ -858,22 +858,22 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
                                          * connecting and I do have an actual
                                          * connection... */
                                         ksocknal_queue_tx_locked (tx, conn);
-                                        cfs_read_unlock (g_lock);
+                                       read_unlock(g_lock);
                                         return (0);
                                 }
                         }
                 }
 
                 /* I'll need a write lock... */
-                cfs_read_unlock (g_lock);
+               read_unlock(g_lock);
 
-                cfs_write_lock_bh (g_lock);
+               write_lock_bh(g_lock);
 
                 peer = ksocknal_find_peer_locked(ni, id);
                 if (peer != NULL)
                         break;
 
-                cfs_write_unlock_bh (g_lock);
+               write_unlock_bh(g_lock);
 
                 if ((id.pid & LNET_PID_USERFLAG) != 0) {
                         CERROR("Refusing to create a connection to "
@@ -902,7 +902,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
         if (conn != NULL) {
                 /* Connection exists; queue message on it */
                 ksocknal_queue_tx_locked (tx, conn);
-                cfs_write_unlock_bh (g_lock);
+               write_unlock_bh(g_lock);
                 return (0);
         }
 
@@ -914,11 +914,11 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
 
                 /* Queue the message until a connection is established */
                 cfs_list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue);
-                cfs_write_unlock_bh (g_lock);
+               write_unlock_bh(g_lock);
                 return 0;
         }
 
-        cfs_write_unlock_bh (g_lock);
+       write_unlock_bh(g_lock);
 
         /* NB Routes may be ignored if connections to them failed recently */
         CNETERR("No usable routes to %s\n", libcfs_id2str(id));
@@ -1014,18 +1014,18 @@ ksocknal_thread_start (int (*fn)(void *arg), void *arg)
         if (pid < 0)
                 return ((int)pid);
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
         ksocknal_data.ksnd_nthreads++;
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
         return (0);
 }
 
 void
 ksocknal_thread_fini (void)
 {
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
         ksocknal_data.ksnd_nthreads--;
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 }
 
 int
@@ -1352,7 +1352,7 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
         LASSERT (conn->ksnc_rx_scheduled);
 
-        cfs_spin_lock_bh (&sched->kss_lock);
+       spin_lock_bh(&sched->kss_lock);
 
         switch (conn->ksnc_rx_state) {
         case SOCKNAL_RX_PARSE_WAIT:
@@ -1368,24 +1368,24 @@ ksocknal_recv (lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 
         conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
 
-        cfs_spin_unlock_bh (&sched->kss_lock);
-        ksocknal_conn_decref(conn);
-        return (0);
+       spin_unlock_bh(&sched->kss_lock);
+       ksocknal_conn_decref(conn);
+       return 0;
 }
 
 static inline int
 ksocknal_sched_cansleep(ksock_sched_t *sched)
 {
-        int           rc;
+       int           rc;
 
-        cfs_spin_lock_bh (&sched->kss_lock);
+       spin_lock_bh(&sched->kss_lock);
 
-        rc = (!ksocknal_data.ksnd_shuttingdown &&
-              cfs_list_empty(&sched->kss_rx_conns) &&
-              cfs_list_empty(&sched->kss_tx_conns));
+       rc = (!ksocknal_data.ksnd_shuttingdown &&
+             cfs_list_empty(&sched->kss_rx_conns) &&
+             cfs_list_empty(&sched->kss_tx_conns));
 
-        cfs_spin_unlock_bh (&sched->kss_lock);
-        return (rc);
+       spin_unlock_bh(&sched->kss_lock);
+       return rc;
 }
 
 int ksocknal_scheduler(void *arg)
@@ -1414,7 +1414,7 @@ int ksocknal_scheduler(void *arg)
                       name, info->ksi_cpt, rc);
        }
 
-        cfs_spin_lock_bh (&sched->kss_lock);
+       spin_lock_bh(&sched->kss_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
                 int did_something = 0;
@@ -1434,11 +1434,11 @@ int ksocknal_scheduler(void *arg)
                          * data_ready can set it any time after we release
                          * kss_lock. */
                         conn->ksnc_rx_ready = 0;
-                        cfs_spin_unlock_bh (&sched->kss_lock);
+                       spin_unlock_bh(&sched->kss_lock);
 
-                        rc = ksocknal_process_receive(conn);
+                       rc = ksocknal_process_receive(conn);
 
-                        cfs_spin_lock_bh (&sched->kss_lock);
+                       spin_lock_bh(&sched->kss_lock);
 
                         /* I'm the only one that can clear this flag */
                         LASSERT(conn->ksnc_rx_scheduled);
@@ -1496,7 +1496,7 @@ int ksocknal_scheduler(void *arg)
                          * write_space can set it any time after we release
                          * kss_lock. */
                         conn->ksnc_tx_ready = 0;
-                        cfs_spin_unlock_bh (&sched->kss_lock);
+                       spin_unlock_bh(&sched->kss_lock);
 
                         if (!cfs_list_empty(&zlist)) {
                                 /* free zombie noop txs, it's fast because 
@@ -1508,14 +1508,14 @@ int ksocknal_scheduler(void *arg)
 
                         if (rc == -ENOMEM || rc == -EAGAIN) {
                                 /* Incomplete send: replace tx on HEAD of tx_queue */
-                                cfs_spin_lock_bh (&sched->kss_lock);
-                                cfs_list_add (&tx->tx_list,
-                                              &conn->ksnc_tx_queue);
-                        } else {
-                                /* Complete send; tx -ref */
-                                ksocknal_tx_decref (tx);
-
-                                cfs_spin_lock_bh (&sched->kss_lock);
+                               spin_lock_bh(&sched->kss_lock);
+                               cfs_list_add(&tx->tx_list,
+                                            &conn->ksnc_tx_queue);
+                       } else {
+                               /* Complete send; tx -ref */
+                               ksocknal_tx_decref(tx);
+
+                               spin_lock_bh(&sched->kss_lock);
                                 /* assume space for more */
                                 conn->ksnc_tx_ready = 1;
                         }
@@ -1538,7 +1538,7 @@ int ksocknal_scheduler(void *arg)
                 }
                 if (!did_something ||           /* nothing to do */
                     ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
-                        cfs_spin_unlock_bh (&sched->kss_lock);
+                       spin_unlock_bh(&sched->kss_lock);
 
                         nloops = 0;
 
@@ -1551,13 +1551,13 @@ int ksocknal_scheduler(void *arg)
                                 cfs_cond_resched();
                         }
 
-                        cfs_spin_lock_bh (&sched->kss_lock);
-                }
-        }
+                       spin_lock_bh(&sched->kss_lock);
+               }
+       }
 
-        cfs_spin_unlock_bh (&sched->kss_lock);
-        ksocknal_thread_fini ();
-        return (0);
+       spin_unlock_bh(&sched->kss_lock);
+       ksocknal_thread_fini();
+       return 0;
 }
 
 /*
@@ -1566,12 +1566,12 @@ int ksocknal_scheduler(void *arg)
  */
 void ksocknal_read_callback (ksock_conn_t *conn)
 {
-        ksock_sched_t *sched;
-        ENTRY;
+       ksock_sched_t *sched;
+       ENTRY;
 
-        sched = conn->ksnc_scheduler;
+       sched = conn->ksnc_scheduler;
 
-        cfs_spin_lock_bh (&sched->kss_lock);
+       spin_lock_bh(&sched->kss_lock);
 
         conn->ksnc_rx_ready = 1;
 
@@ -1584,9 +1584,9 @@ void ksocknal_read_callback (ksock_conn_t *conn)
 
                 cfs_waitq_signal (&sched->kss_waitq);
         }
-        cfs_spin_unlock_bh (&sched->kss_lock);
+       spin_unlock_bh(&sched->kss_lock);
 
-        EXIT;
+       EXIT;
 }
 
 /*
@@ -1595,12 +1595,12 @@ void ksocknal_read_callback (ksock_conn_t *conn)
  */
 void ksocknal_write_callback (ksock_conn_t *conn)
 {
-        ksock_sched_t *sched;
-        ENTRY;
+       ksock_sched_t *sched;
+       ENTRY;
 
-        sched = conn->ksnc_scheduler;
+       sched = conn->ksnc_scheduler;
 
-        cfs_spin_lock_bh (&sched->kss_lock);
+       spin_lock_bh(&sched->kss_lock);
 
         conn->ksnc_tx_ready = 1;
 
@@ -1615,9 +1615,9 @@ void ksocknal_write_callback (ksock_conn_t *conn)
                 cfs_waitq_signal (&sched->kss_waitq);
         }
 
-        cfs_spin_unlock_bh (&sched->kss_lock);
+       spin_unlock_bh(&sched->kss_lock);
 
-        EXIT;
+       EXIT;
 }
 
 ksock_proto_t *
@@ -1864,7 +1864,7 @@ ksocknal_connect (ksock_route_t *route)
         deadline = cfs_time_add(cfs_time_current(),
                                 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         LASSERT (route->ksnr_scheduled);
         LASSERT (!route->ksnr_connecting);
@@ -1904,7 +1904,7 @@ ksocknal_connect (ksock_route_t *route)
                         type = SOCKLND_CONN_BULK_OUT;
                 }
 
-                cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+               write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
                 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
                         rc = -ETIMEDOUT;
@@ -1935,7 +1935,7 @@ ksocknal_connect (ksock_route_t *route)
                         CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
                                libcfs_nid2str(peer->ksnp_id.nid));
 
-                cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+               write_lock_bh(&ksocknal_data.ksnd_global_lock);
         }
 
         route->ksnr_scheduled = 0;
@@ -1960,11 +1960,11 @@ ksocknal_connect (ksock_route_t *route)
                 ksocknal_launch_connection_locked(route);
         }
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
         return retry_later;
 
  failed:
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         route->ksnr_scheduled = 0;
         route->ksnr_connecting = 0;
@@ -2007,7 +2007,7 @@ ksocknal_connect (ksock_route_t *route)
                 cfs_list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
         }
 #endif
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         ksocknal_peer_failed(peer);
         ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
@@ -2057,12 +2057,12 @@ ksocknal_connd_check_start(long sec, long *timeout)
 
         ksocknal_data.ksnd_connd_starting_stamp = sec;
         ksocknal_data.ksnd_connd_starting++;
-        cfs_spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
+       spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
 
-        /* NB: total is the next id */
-        rc = ksocknal_thread_start(ksocknal_connd, (void *)((long)total));
+       /* NB: total is the next id */
+       rc = ksocknal_thread_start(ksocknal_connd, (void *)((long)total));
 
-        cfs_spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
+       spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
         if (rc == 0)
                 return 1;
 
@@ -2144,7 +2144,7 @@ ksocknal_connd_get_route_locked(signed long *timeout_p)
 int
 ksocknal_connd (void *arg)
 {
-        cfs_spinlock_t    *connd_lock = &ksocknal_data.ksnd_connd_lock;
+       spinlock_t    *connd_lock = &ksocknal_data.ksnd_connd_lock;
         long               id = (long)(long_ptr_t)arg;
         char               name[16];
         ksock_connreq_t   *cr;
@@ -2158,7 +2158,7 @@ ksocknal_connd (void *arg)
 
         cfs_waitlink_init (&wait);
 
-        cfs_spin_lock_bh (connd_lock);
+       spin_lock_bh(connd_lock);
 
         LASSERT(ksocknal_data.ksnd_connd_starting > 0);
         ksocknal_data.ksnd_connd_starting--;
@@ -2187,15 +2187,15 @@ ksocknal_connd (void *arg)
                                             next, ksock_connreq_t, ksncr_list);
 
                         cfs_list_del(&cr->ksncr_list);
-                        cfs_spin_unlock_bh(connd_lock);
-                        dropped_lock = 1;
+                       spin_unlock_bh(connd_lock);
+                       dropped_lock = 1;
 
-                        ksocknal_create_conn(cr->ksncr_ni, NULL,
-                                             cr->ksncr_sock, SOCKLND_CONN_NONE);
-                        lnet_ni_decref(cr->ksncr_ni);
-                        LIBCFS_FREE(cr, sizeof(*cr));
+                       ksocknal_create_conn(cr->ksncr_ni, NULL,
+                                            cr->ksncr_sock, SOCKLND_CONN_NONE);
+                       lnet_ni_decref(cr->ksncr_ni);
+                       LIBCFS_FREE(cr, sizeof(*cr));
 
-                        cfs_spin_lock_bh(connd_lock);
+                       spin_lock_bh(connd_lock);
                 }
 
                 /* Only handle an outgoing connection request if there
@@ -2208,7 +2208,7 @@ ksocknal_connd (void *arg)
                 if (route != NULL) {
                         cfs_list_del (&route->ksnr_connd_list);
                         ksocknal_data.ksnd_connd_connecting++;
-                        cfs_spin_unlock_bh(connd_lock);
+                       spin_unlock_bh(connd_lock);
                         dropped_lock = 1;
 
                         if (ksocknal_connect(route)) {
@@ -2225,38 +2225,37 @@ ksocknal_connd (void *arg)
 
                         ksocknal_route_decref(route);
 
-                        cfs_spin_lock_bh(connd_lock);
-                        ksocknal_data.ksnd_connd_connecting--;
-                }
-
-                if (dropped_lock) {
-                        if (++nloops < SOCKNAL_RESCHED)
-                                continue;
-                        cfs_spin_unlock_bh(connd_lock);
-                        nloops = 0;
-                        cfs_cond_resched();
-                        cfs_spin_lock_bh(connd_lock);
-                        continue;
-                }
-
-                /* Nothing to do for 'timeout'  */
-                cfs_set_current_state (CFS_TASK_INTERRUPTIBLE);
-                cfs_waitq_add_exclusive (&ksocknal_data.ksnd_connd_waitq,
-                                         &wait);
-                cfs_spin_unlock_bh(connd_lock);
-
-                nloops = 0;
-                cfs_waitq_timedwait (&wait, CFS_TASK_INTERRUPTIBLE, timeout);
-
-                cfs_set_current_state (CFS_TASK_RUNNING);
-                cfs_waitq_del (&ksocknal_data.ksnd_connd_waitq, &wait);
-                cfs_spin_lock_bh(connd_lock);
-        }
-        ksocknal_data.ksnd_connd_running--;
-        cfs_spin_unlock_bh(connd_lock);
+                       spin_lock_bh(connd_lock);
+                       ksocknal_data.ksnd_connd_connecting--;
+               }
+
+               if (dropped_lock) {
+                       if (++nloops < SOCKNAL_RESCHED)
+                               continue;
+                       spin_unlock_bh(connd_lock);
+                       nloops = 0;
+                       cfs_cond_resched();
+                       spin_lock_bh(connd_lock);
+                       continue;
+               }
+
+               /* Nothing to do for 'timeout'  */
+               cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
+               cfs_waitq_add_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+               spin_unlock_bh(connd_lock);
+
+               nloops = 0;
+               cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout);
+
+               cfs_set_current_state(CFS_TASK_RUNNING);
+               cfs_waitq_del(&ksocknal_data.ksnd_connd_waitq, &wait);
+               spin_lock_bh(connd_lock);
+       }
+       ksocknal_data.ksnd_connd_running--;
+       spin_unlock_bh(connd_lock);
 
-        ksocknal_thread_fini ();
-        return (0);
+       ksocknal_thread_fini();
+       return 0;
 }
 
 ksock_conn_t *
@@ -2350,7 +2349,7 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
         ksock_tx_t        *tx;
         CFS_LIST_HEAD      (stale_txs);
 
-        cfs_write_lock_bh (&ksocknal_data.ksnd_global_lock);
+       write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
         while (!cfs_list_empty (&peer->ksnp_tx_queue)) {
                 tx = cfs_list_entry (peer->ksnp_tx_queue.next,
@@ -2364,7 +2363,7 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
                 cfs_list_add_tail (&tx->tx_list, &stale_txs);
         }
 
-        cfs_write_unlock_bh (&ksocknal_data.ksnd_global_lock);
+       write_unlock_bh(&ksocknal_data.ksnd_global_lock);
 
         ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
 }
@@ -2400,34 +2399,34 @@ ksocknal_send_keepalive_locked(ksock_peer_t *peer)
         if (conn != NULL) {
                 sched = conn->ksnc_scheduler;
 
-                cfs_spin_lock_bh (&sched->kss_lock);
-                if (!cfs_list_empty(&conn->ksnc_tx_queue)) {
-                        cfs_spin_unlock_bh(&sched->kss_lock);
-                        /* there is an queued ACK, don't need keepalive */
-                        return 0;
-                }
+               spin_lock_bh(&sched->kss_lock);
+               if (!cfs_list_empty(&conn->ksnc_tx_queue)) {
+                       spin_unlock_bh(&sched->kss_lock);
+                       /* there is an queued ACK, don't need keepalive */
+                       return 0;
+               }
 
-                cfs_spin_unlock_bh(&sched->kss_lock);
-        }
+               spin_unlock_bh(&sched->kss_lock);
+       }
 
-        cfs_read_unlock(&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 
-        /* cookie = 1 is reserved for keepalive PING */
-        tx = ksocknal_alloc_tx_noop(1, 1);
-        if (tx == NULL) {
-                cfs_read_lock(&ksocknal_data.ksnd_global_lock);
-                return -ENOMEM;
-        }
+       /* cookie = 1 is reserved for keepalive PING */
+       tx = ksocknal_alloc_tx_noop(1, 1);
+       if (tx == NULL) {
+               read_lock(&ksocknal_data.ksnd_global_lock);
+               return -ENOMEM;
+       }
 
-        if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
-                cfs_read_lock(&ksocknal_data.ksnd_global_lock);
-                return 1;
-        }
+       if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) {
+               read_lock(&ksocknal_data.ksnd_global_lock);
+               return 1;
+       }
 
-        ksocknal_free_tx(tx);
-        cfs_read_lock(&ksocknal_data.ksnd_global_lock);
+       ksocknal_free_tx(tx);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
-        return -EIO;
+       return -EIO;
 }
 
 
@@ -2443,7 +2442,7 @@ ksocknal_check_peer_timeouts (int idx)
         /* NB. We expect to have a look at all the peers and not find any
          * connections to time out, so we just use a shared lock while we
          * take a look... */
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
         cfs_list_for_each_entry_typed(peer, peers, ksock_peer_t, ksnp_list) {
                 cfs_time_t  deadline = 0;
@@ -2451,14 +2450,14 @@ ksocknal_check_peer_timeouts (int idx)
                 int         n     = 0;
 
                 if (ksocknal_send_keepalive_locked(peer) != 0) {
-                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+                       read_unlock(&ksocknal_data.ksnd_global_lock);
                         goto again;
                 }
 
                 conn = ksocknal_find_timed_out_conn (peer);
 
                 if (conn != NULL) {
-                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+                       read_unlock(&ksocknal_data.ksnd_global_lock);
 
                         ksocknal_close_conn_and_siblings (conn, -ETIMEDOUT);
 
@@ -2480,7 +2479,7 @@ ksocknal_check_peer_timeouts (int idx)
                                              tx->tx_deadline)) {
 
                                 ksocknal_peer_addref(peer);
-                                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+                               read_unlock(&ksocknal_data.ksnd_global_lock);
 
                                 ksocknal_flush_stale_txs(peer);
 
@@ -2492,7 +2491,7 @@ ksocknal_check_peer_timeouts (int idx)
                 if (cfs_list_empty(&peer->ksnp_zc_req_list))
                         continue;
 
-                cfs_spin_lock(&peer->ksnp_lock);
+               spin_lock(&peer->ksnp_lock);
                 cfs_list_for_each_entry_typed(tx, &peer->ksnp_zc_req_list,
                                               ksock_tx_t, tx_zc_list) {
                         if (!cfs_time_aftereq(cfs_time_current(),
@@ -2505,7 +2504,7 @@ ksocknal_check_peer_timeouts (int idx)
                 }
 
                 if (n == 0) {
-                        cfs_spin_unlock(&peer->ksnp_lock);
+                       spin_unlock(&peer->ksnp_lock);
                         continue;
                 }
 
@@ -2516,8 +2515,8 @@ ksocknal_check_peer_timeouts (int idx)
                 conn     = tx->tx_conn;
                 ksocknal_conn_addref(conn);
 
-                cfs_spin_unlock(&peer->ksnp_lock);
-                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+               spin_unlock(&peer->ksnp_lock);
+               read_unlock(&ksocknal_data.ksnd_global_lock);
 
                 CERROR("Total %d stale ZC_REQs for peer %s detected; the "
                        "oldest(%p) timed out %ld secs ago, "
@@ -2531,7 +2530,7 @@ ksocknal_check_peer_timeouts (int idx)
                 goto again;
         }
 
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 }
 
 int
@@ -2553,7 +2552,7 @@ ksocknal_reaper (void *arg)
         CFS_INIT_LIST_HEAD(&enomem_conns);
         cfs_waitlink_init (&wait);
 
-        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+       spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
 
         while (!ksocknal_data.ksnd_shuttingdown) {
 
@@ -2563,12 +2562,12 @@ ksocknal_reaper (void *arg)
                                                ksock_conn_t, ksnc_list);
                         cfs_list_del (&conn->ksnc_list);
 
-                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                       spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
-                        ksocknal_terminate_conn (conn);
-                        ksocknal_conn_decref(conn);
+                       ksocknal_terminate_conn(conn);
+                       ksocknal_conn_decref(conn);
 
-                        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                       spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
                         continue;
                 }
 
@@ -2577,11 +2576,11 @@ ksocknal_reaper (void *arg)
                                                next, ksock_conn_t, ksnc_list);
                         cfs_list_del (&conn->ksnc_list);
 
-                        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+                       spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
-                        ksocknal_destroy_conn (conn);
+                       ksocknal_destroy_conn(conn);
 
-                        cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
+                       spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
                         continue;
                 }
 
@@ -2591,7 +2590,7 @@ ksocknal_reaper (void *arg)
                         cfs_list_del_init(&ksocknal_data.ksnd_enomem_conns);
                 }
 
-                cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+               spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
                 /* reschedule all the connections that stalled with ENOMEM... */
                 nenomem_conns = 0;
@@ -2602,15 +2601,15 @@ ksocknal_reaper (void *arg)
 
                         sched = conn->ksnc_scheduler;
 
-                        cfs_spin_lock_bh (&sched->kss_lock);
+                       spin_lock_bh(&sched->kss_lock);
 
-                        LASSERT (conn->ksnc_tx_scheduled);
-                        conn->ksnc_tx_ready = 1;
-                        cfs_list_add_tail(&conn->ksnc_tx_list,
-                                          &sched->kss_tx_conns);
-                        cfs_waitq_signal (&sched->kss_waitq);
+                       LASSERT(conn->ksnc_tx_scheduled);
+                       conn->ksnc_tx_ready = 1;
+                       cfs_list_add_tail(&conn->ksnc_tx_list,
+                                         &sched->kss_tx_conns);
+                       cfs_waitq_signal(&sched->kss_waitq);
 
-                        cfs_spin_unlock_bh (&sched->kss_lock);
+                       spin_unlock_bh(&sched->kss_lock);
                         nenomem_conns++;
                 }
 
@@ -2664,11 +2663,11 @@ ksocknal_reaper (void *arg)
                 cfs_set_current_state (CFS_TASK_RUNNING);
                 cfs_waitq_del (&ksocknal_data.ksnd_reaper_waitq, &wait);
 
-                cfs_spin_lock_bh (&ksocknal_data.ksnd_reaper_lock);
-        }
+               spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
+       }
 
-        cfs_spin_unlock_bh (&ksocknal_data.ksnd_reaper_lock);
+       spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
 
-        ksocknal_thread_fini ();
-        return (0);
+       ksocknal_thread_fini();
+       return 0;
 }
index 445ea61..04760f2 100644 (file)
@@ -1064,7 +1064,7 @@ ksocknal_data_ready (struct sock *sk, int n)
 
         /* interleave correctly with closing sockets... */
         LASSERT(!in_irq());
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
         conn = sk->sk_user_data;
         if (conn == NULL) {             /* raced with ksocknal_terminate_conn */
@@ -1073,7 +1073,7 @@ ksocknal_data_ready (struct sock *sk, int n)
         } else
                 ksocknal_read_callback(conn);
 
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 
         EXIT;
 }
@@ -1087,7 +1087,7 @@ ksocknal_write_space (struct sock *sk)
 
         /* interleave correctly with closing sockets... */
         LASSERT(!in_irq());
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
         conn = sk->sk_user_data;
         wspace = SOCKNAL_WSPACE(sk);
@@ -1106,7 +1106,7 @@ ksocknal_write_space (struct sock *sk)
                 LASSERT (sk->sk_write_space != &ksocknal_write_space);
                 sk->sk_write_space (sk);
 
-                cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+               read_unlock(&ksocknal_data.ksnd_global_lock);
                 return;
         }
 
@@ -1120,7 +1120,7 @@ ksocknal_write_space (struct sock *sk)
                 clear_bit (SOCK_NOSPACE, &sk->sk_socket->flags);
         }
 
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 }
 
 void
@@ -1159,11 +1159,11 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
 int
 ksocknal_lib_memory_pressure(ksock_conn_t *conn)
 {
-        int            rc = 0;
-        ksock_sched_t *sched;
-        
-        sched = conn->ksnc_scheduler;
-        cfs_spin_lock_bh (&sched->kss_lock);
+       int            rc = 0;
+       ksock_sched_t *sched;
+
+       sched = conn->ksnc_scheduler;
+       spin_lock_bh(&sched->kss_lock);
 
         if (!SOCK_TEST_NOSPACE(conn->ksnc_sock) &&
             !conn->ksnc_tx_ready) {
@@ -1178,7 +1178,7 @@ ksocknal_lib_memory_pressure(ksock_conn_t *conn)
                 rc = -ENOMEM;
         }
 
-        cfs_spin_unlock_bh (&sched->kss_lock);
+       spin_unlock_bh(&sched->kss_lock);
 
-        return rc;
+       return rc;
 }
index 08461e7..f325c99 100644 (file)
@@ -542,7 +542,7 @@ ksocknal_lib_push_conn (ksock_conn_t *conn)
 
         ks_get_tconn(tconn);
 
-        cfs_spin_lock(&tconn->kstc_lock);
+       spin_lock(&tconn->kstc_lock);
         if (tconn->kstc_type == kstt_sender) {
                 nagle = tconn->sender.kstc_info.nagle;
                 tconn->sender.kstc_info.nagle = 0;
@@ -552,7 +552,7 @@ ksocknal_lib_push_conn (ksock_conn_t *conn)
                 tconn->child.kstc_info.nagle = 0;
         }
 
-        cfs_spin_unlock(&tconn->kstc_lock);
+       spin_unlock(&tconn->kstc_lock);
 
         val = 1;
         rc = ks_set_tcp_option(
@@ -563,15 +563,15 @@ ksocknal_lib_push_conn (ksock_conn_t *conn)
                     );
 
         LASSERT (rc == 0);
-        cfs_spin_lock(&tconn->kstc_lock);
-
-        if (tconn->kstc_type == kstt_sender) {
-                tconn->sender.kstc_info.nagle = nagle;
-        } else {
-                LASSERT(tconn->kstc_type == kstt_child);
-                tconn->child.kstc_info.nagle = nagle;
-        }
-        cfs_spin_unlock(&tconn->kstc_lock);
+       spin_lock(&tconn->kstc_lock);
+
+       if (tconn->kstc_type == kstt_sender) {
+               tconn->sender.kstc_info.nagle = nagle;
+       } else {
+               LASSERT(tconn->kstc_type == kstt_child);
+               tconn->child.kstc_info.nagle = nagle;
+       }
+       spin_unlock(&tconn->kstc_lock);
         ks_put_tconn(tconn);
 }
 
@@ -616,28 +616,28 @@ void ksocknal_schedule_callback(struct socket*sock, int mode)
 {
         ksock_conn_t * conn = (ksock_conn_t *) sock->kstc_conn;
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
         if (mode) {
                 ksocknal_write_callback(conn);
         } else {
                 ksocknal_read_callback(conn);
         }
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 }
 
 void
 ksocknal_tx_fini_callback(ksock_conn_t * conn, ksock_tx_t * tx)
 {
-        /* remove tx/conn from conn's outgoing queue */
-        cfs_spin_lock_bh (&conn->ksnc_scheduler->kss_lock);
-        cfs_list_del(&tx->tx_list);
-        if (cfs_list_empty(&conn->ksnc_tx_queue)) {
-                cfs_list_del (&conn->ksnc_tx_list);
-        }
-        cfs_spin_unlock_bh (&conn->ksnc_scheduler->kss_lock);
+       /* remove tx/conn from conn's outgoing queue */
+       spin_lock_bh(&conn->ksnc_scheduler->kss_lock);
+       cfs_list_del(&tx->tx_list);
+       if (cfs_list_empty(&conn->ksnc_tx_queue))
+               cfs_list_del(&conn->ksnc_tx_list);
+
+       spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
 
-        /* complete send; tx -ref */
-        ksocknal_tx_decref (tx);
+       /* complete send; tx -ref */
+       ksocknal_tx_decref(tx);
 }
 
 void
index f2618b2..c60383e 100644 (file)
@@ -358,32 +358,32 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
 static int
 ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
 {
-        ksock_peer_t   *peer = c->ksnc_peer;
-        ksock_conn_t   *conn;
-        ksock_tx_t     *tx;
-        int             rc;
+       ksock_peer_t   *peer = c->ksnc_peer;
+       ksock_conn_t   *conn;
+       ksock_tx_t     *tx;
+       int             rc;
 
-        cfs_read_lock (&ksocknal_data.ksnd_global_lock);
+       read_lock(&ksocknal_data.ksnd_global_lock);
 
-        conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
-        if (conn != NULL) {
-                ksock_sched_t *sched = conn->ksnc_scheduler;
+       conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
+       if (conn != NULL) {
+               ksock_sched_t *sched = conn->ksnc_scheduler;
 
-                LASSERT (conn->ksnc_proto->pro_queue_tx_zcack != NULL);
+               LASSERT(conn->ksnc_proto->pro_queue_tx_zcack != NULL);
 
-                cfs_spin_lock_bh (&sched->kss_lock);
+               spin_lock_bh(&sched->kss_lock);
 
-                rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
+               rc = conn->ksnc_proto->pro_queue_tx_zcack(conn, NULL, cookie);
 
-                cfs_spin_unlock_bh (&sched->kss_lock);
+               spin_unlock_bh(&sched->kss_lock);
 
-                if (rc) { /* piggybacked */
-                        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
-                        return 0;
-                }
-        }
+               if (rc) { /* piggybacked */
+                       read_unlock(&ksocknal_data.ksnd_global_lock);
+                       return 0;
+               }
+       }
 
-        cfs_read_unlock (&ksocknal_data.ksnd_global_lock);
+       read_unlock(&ksocknal_data.ksnd_global_lock);
 
         /* ACK connection is not ready, or can't piggyback the ACK */
         tx = ksocknal_alloc_tx_noop(cookie, !!remote);
@@ -418,7 +418,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
                 return count == 1 ? 0 : -EPROTO;
         }
 
-        cfs_spin_lock(&peer->ksnp_lock);
+       spin_lock(&peer->ksnp_lock);
 
         cfs_list_for_each_entry_safe(tx, tmp,
                                      &peer->ksnp_zc_req_list, tx_zc_list) {
@@ -434,7 +434,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
                 }
         }
 
-        cfs_spin_unlock(&peer->ksnp_lock);
+       spin_unlock(&peer->ksnp_lock);
 
         while (!cfs_list_empty(&zlist)) {
                 tx = cfs_list_entry(zlist.next, ksock_tx_t, tx_zc_list);
index a28da78..8a5482a 100644 (file)
@@ -47,9 +47,9 @@ struct {
         int                   pta_shutdown;
         cfs_socket_t         *pta_sock;
 #ifdef __KERNEL__
-        cfs_completion_t      pta_signal;
+       struct completion       pta_signal;
 #else
-        cfs_mt_completion_t   pta_signal;
+       mt_completion_t         pta_signal;
 #endif
 } lnet_acceptor_state;
 
@@ -68,10 +68,10 @@ lnet_accept_magic(__u32 magic, __u32 constant)
 
 #ifdef __KERNEL__
 
-#define cfs_mt_init_completion(c)     cfs_init_completion(c)
-#define cfs_mt_wait_for_completion(c) cfs_wait_for_completion(c)
-#define cfs_mt_complete(c)            cfs_complete(c)
-#define cfs_mt_fini_completion(c)     cfs_fini_completion(c)
+#define mt_init_completion(c)     init_completion(c)
+#define mt_wait_for_completion(c) wait_for_completion(c)
+#define mt_complete(c)            complete(c)
+#define mt_fini_completion(c)     fini_completion(c)
 
 EXPORT_SYMBOL(lnet_acceptor_port);
 
@@ -431,7 +431,7 @@ lnet_acceptor(void *arg)
 
         /* set init status and unblock parent */
         lnet_acceptor_state.pta_shutdown = rc;
-        cfs_mt_complete(&lnet_acceptor_state.pta_signal);
+       mt_complete(&lnet_acceptor_state.pta_signal);
 
         if (rc != 0)
                 return rc;
@@ -490,7 +490,7 @@ lnet_acceptor(void *arg)
         CDEBUG(D_NET, "Acceptor stopping\n");
 
         /* unblock lnet_acceptor_stop() */
-        cfs_mt_complete(&lnet_acceptor_state.pta_signal);
+       mt_complete(&lnet_acceptor_state.pta_signal);
         return 0;
 }
 
@@ -531,10 +531,10 @@ lnet_acceptor_start(void)
                 return 0;
 #endif
 
-        cfs_mt_init_completion(&lnet_acceptor_state.pta_signal);
+       mt_init_completion(&lnet_acceptor_state.pta_signal);
         rc = accept2secure(accept_type, &secure);
         if (rc <= 0) {
-                cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+               mt_fini_completion(&lnet_acceptor_state.pta_signal);
                 return rc;
         }
 
@@ -544,13 +544,13 @@ lnet_acceptor_start(void)
         rc2 = cfs_create_thread(lnet_acceptor, (void *)(ulong_ptr_t)secure, 0);
         if (rc2 < 0) {
                 CERROR("Can't start acceptor thread: %d\n", rc);
-                cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+               mt_fini_completion(&lnet_acceptor_state.pta_signal);
 
                 return -ESRCH;
         }
 
         /* wait for acceptor to startup */
-        cfs_mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
+       mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
 
         if (!lnet_acceptor_state.pta_shutdown) {
                 /* started OK */
@@ -559,7 +559,7 @@ lnet_acceptor_start(void)
         }
 
         LASSERT (lnet_acceptor_state.pta_sock == NULL);
-        cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+       mt_fini_completion(&lnet_acceptor_state.pta_signal);
 
         return -ENETDOWN;
 }
@@ -574,9 +574,9 @@ lnet_acceptor_stop(void)
         libcfs_sock_abort_accept(lnet_acceptor_state.pta_sock);
 
         /* block until acceptor signals exit */
-        cfs_mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
+       mt_wait_for_completion(&lnet_acceptor_state.pta_signal);
 
-        cfs_mt_fini_completion(&lnet_acceptor_state.pta_signal);
+       mt_fini_completion(&lnet_acceptor_state.pta_signal);
 }
 
 #else /* single-threaded user-space */
index 0a0b8a9..693011c 100644 (file)
@@ -92,10 +92,10 @@ lnet_get_networks(void)
 void
 lnet_init_locks(void)
 {
-       cfs_spin_lock_init(&the_lnet.ln_eq_wait_lock);
+       spin_lock_init(&the_lnet.ln_eq_wait_lock);
        cfs_waitq_init(&the_lnet.ln_eq_waitq);
-       cfs_mutex_init(&the_lnet.ln_lnd_mutex);
-       cfs_mutex_init(&the_lnet.ln_api_mutex);
+       mutex_init(&the_lnet.ln_lnd_mutex);
+       mutex_init(&the_lnet.ln_api_mutex);
 }
 
 void
index 327c019..95b9b9b 100644 (file)
@@ -133,7 +133,7 @@ lnet_ni_alloc(__u32 net, struct cfs_expr_list *el, cfs_list_t *nilist)
         }
 
 #ifdef __KERNEL__
-       cfs_spin_lock_init(&ni->ni_lock);
+       spin_lock_init(&ni->ni_lock);
 #else
 # ifdef HAVE_LIBPTHREAD
        pthread_mutex_init(&ni->ni_lock, NULL);
index 527a28a..c8378d8 100644 (file)
@@ -776,7 +776,7 @@ lnet_ptl_setup(struct lnet_portal *ptl, int index)
        CFS_INIT_LIST_HEAD(&ptl->ptl_msg_delayed);
        CFS_INIT_LIST_HEAD(&ptl->ptl_msg_stealing);
 #ifdef __KERNEL__
-       cfs_spin_lock_init(&ptl->ptl_lock);
+       spin_lock_init(&ptl->ptl_lock);
 #else
 # ifdef HAVE_LIBPTHREAD
        pthread_mutex_init(&ptl->ptl_lock, NULL);
index 589bade..e189b2a 100644 (file)
@@ -39,7 +39,7 @@ static int config_on_load = 0;
 CFS_MODULE_PARM(config_on_load, "i", int, 0444,
                 "configure network at module load");
 
-static cfs_mutex_t lnet_config_mutex;
+static struct mutex lnet_config_mutex;
 
 int
 lnet_configure (void *arg)
@@ -114,7 +114,7 @@ init_lnet(void)
         int                  rc;
         ENTRY;
 
-        cfs_mutex_init(&lnet_config_mutex);
+       mutex_init(&lnet_config_mutex);
 
         rc = LNetInit();
         if (rc != 0) {
index a380035..f4f185d 100644 (file)
@@ -1049,7 +1049,7 @@ lnet_router_checker_start(void)
                 return 0;
 
 #ifdef __KERNEL__
-        cfs_sema_init(&the_lnet.ln_rc_signal, 0);
+       sema_init(&the_lnet.ln_rc_signal, 0);
         /* EQ size doesn't matter; the callback is guaranteed to get every
          * event */
        eqsz = 0;
@@ -1070,7 +1070,7 @@ lnet_router_checker_start(void)
         if (rc < 0) {
                 CERROR("Can't start router checker thread: %d\n", rc);
                 /* block until event callback signals exit */
-                cfs_down(&the_lnet.ln_rc_signal);
+               down(&the_lnet.ln_rc_signal);
                 rc = LNetEQFree(the_lnet.ln_rc_eqh);
                 LASSERT (rc == 0);
                 the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
@@ -1101,7 +1101,7 @@ lnet_router_checker_stop (void)
 
 #ifdef __KERNEL__
        /* block until event callback signals exit */
-       cfs_down(&the_lnet.ln_rc_signal);
+       down(&the_lnet.ln_rc_signal);
 #else
        lnet_router_checker();
 #endif
@@ -1256,7 +1256,7 @@ rescan:
        lnet_prune_rc_data(1); /* wait for UNLINK */
 
        the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
-       cfs_up(&the_lnet.ln_rc_signal);
+       up(&the_lnet.ln_rc_signal);
        /* The unlink event callback will signal final completion */
        return 0;
 }
index 9c2f39f..d3f45ba 100644 (file)
@@ -836,7 +836,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
                 return -EFAULT;
         }
 
-        cfs_mutex_lock(&console_session.ses_mutex);
+       mutex_lock(&console_session.ses_mutex);
 
         console_session.ses_laststamp = cfs_time_current_sec();
 
@@ -920,7 +920,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_data *data)
                              sizeof(lstcon_trans_stat_t)))
                 rc = -EFAULT;
 out:
-        cfs_mutex_unlock(&console_session.ses_mutex);
+       mutex_unlock(&console_session.ses_mutex);
 
         LIBCFS_FREE(buf, data->ioc_plen1);
 
index 1213ef0..2ea6e51 100644 (file)
@@ -54,17 +54,17 @@ void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *,
 static void
 lstcon_rpc_done(srpc_client_rpc_t *rpc)
 {
-        lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
+       lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
 
-        LASSERT (crpc != NULL && rpc == crpc->crp_rpc);
-        LASSERT (crpc->crp_posted && !crpc->crp_finished);
+       LASSERT(crpc != NULL && rpc == crpc->crp_rpc);
+       LASSERT(crpc->crp_posted && !crpc->crp_finished);
 
-        cfs_spin_lock(&rpc->crpc_lock);
+       spin_lock(&rpc->crpc_lock);
 
-        if (crpc->crp_trans == NULL) {
-                /* Orphan RPC is not in any transaction, 
-                 * I'm just a poor body and nobody loves me */
-                cfs_spin_unlock(&rpc->crpc_lock);
+       if (crpc->crp_trans == NULL) {
+               /* Orphan RPC is not in any transaction,
+                * I'm just a poor body and nobody loves me */
+               spin_unlock(&rpc->crpc_lock);
 
                 /* release it */
                 lstcon_rpc_put(crpc);
@@ -86,7 +86,7 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
         if (cfs_atomic_dec_and_test(&crpc->crp_trans->tas_remaining))
                 cfs_waitq_signal(&crpc->crp_trans->tas_waitq);
 
-        cfs_spin_unlock(&rpc->crpc_lock);
+       spin_unlock(&rpc->crpc_lock);
 }
 
 int
@@ -118,27 +118,26 @@ int
 lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
                int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
 {
-        lstcon_rpc_t  *crpc = NULL;
-        int            rc;
+       lstcon_rpc_t  *crpc = NULL;
+       int            rc;
 
-        cfs_spin_lock(&console_session.ses_rpc_lock);
+       spin_lock(&console_session.ses_rpc_lock);
 
-        if (!cfs_list_empty(&console_session.ses_rpc_freelist)) {
-                crpc = cfs_list_entry(console_session.ses_rpc_freelist.next,
-                                      lstcon_rpc_t, crp_link);
-                cfs_list_del_init(&crpc->crp_link);
-        }
+       if (!cfs_list_empty(&console_session.ses_rpc_freelist)) {
+               crpc = cfs_list_entry(console_session.ses_rpc_freelist.next,
+                                     lstcon_rpc_t, crp_link);
+               cfs_list_del_init(&crpc->crp_link);
+       }
 
-        cfs_spin_unlock(&console_session.ses_rpc_lock);
+       spin_unlock(&console_session.ses_rpc_lock);
 
-        if (crpc == NULL) {
-                LIBCFS_ALLOC(crpc, sizeof(*crpc));
-                if (crpc == NULL)
-                        return -ENOMEM;
-        }
+       if (crpc == NULL) {
+               LIBCFS_ALLOC(crpc, sizeof(*crpc));
+               if (crpc == NULL)
+                       return -ENOMEM;
+       }
 
-       rc = lstcon_rpc_init(nd, service, feats,
-                            bulk_npg, bulk_len, 0, crpc);
+       rc = lstcon_rpc_init(nd, service, feats, bulk_npg, bulk_len, 0, crpc);
         if (rc == 0) {
                 *crpcpp = crpc;
                 return 0;
@@ -171,17 +170,17 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
                memset(crpc, 0, sizeof(*crpc));
                crpc->crp_embedded = 1;
 
-        } else {
-                cfs_spin_lock(&console_session.ses_rpc_lock);
+       } else {
+               spin_lock(&console_session.ses_rpc_lock);
 
-                cfs_list_add(&crpc->crp_link,
-                             &console_session.ses_rpc_freelist);
+               cfs_list_add(&crpc->crp_link,
+                            &console_session.ses_rpc_freelist);
 
-                cfs_spin_unlock(&console_session.ses_rpc_lock);
-        }
+               spin_unlock(&console_session.ses_rpc_lock);
+       }
 
-        /* RPC is not alive now */
-        cfs_atomic_dec(&console_session.ses_rpc_counter);
+       /* RPC is not alive now */
+       cfs_atomic_dec(&console_session.ses_rpc_counter);
 }
 
 void
@@ -270,9 +269,9 @@ lstcon_rpc_trans_prep(cfs_list_t *translist,
         cfs_atomic_set(&trans->tas_remaining, 0);
         cfs_waitq_init(&trans->tas_waitq);
 
-       cfs_spin_lock(&console_session.ses_rpc_lock);
+       spin_lock(&console_session.ses_rpc_lock);
        trans->tas_features = console_session.ses_features;
-       cfs_spin_unlock(&console_session.ses_rpc_lock);
+       spin_unlock(&console_session.ses_rpc_lock);
 
        *transpp = trans;
        return 0;
@@ -296,22 +295,22 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
                                        lstcon_rpc_t, crp_link) {
                 rpc = crpc->crp_rpc;
 
-                cfs_spin_lock(&rpc->crpc_lock);
+               spin_lock(&rpc->crpc_lock);
 
-                if (!crpc->crp_posted || /* not posted */
-                    crpc->crp_stamp != 0) { /* rpc done or aborted already */
-                        if (crpc->crp_stamp == 0) {
-                                crpc->crp_stamp = cfs_time_current();
-                                crpc->crp_status = -EINTR;
-                        }
-                        cfs_spin_unlock(&rpc->crpc_lock);
-                        continue;
-                }
+               if (!crpc->crp_posted || /* not posted */
+                   crpc->crp_stamp != 0) { /* rpc done or aborted already */
+                       if (crpc->crp_stamp == 0) {
+                               crpc->crp_stamp = cfs_time_current();
+                               crpc->crp_status = -EINTR;
+                       }
+                       spin_unlock(&rpc->crpc_lock);
+                       continue;
+               }
 
-                crpc->crp_stamp  = cfs_time_current();
-                crpc->crp_status = error;
+               crpc->crp_stamp  = cfs_time_current();
+               crpc->crp_status = error;
 
-                cfs_spin_unlock(&rpc->crpc_lock);
+               spin_unlock(&rpc->crpc_lock);
 
                 sfw_abort_rpc(rpc);
 
@@ -360,7 +359,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
                 lstcon_rpc_post(crpc);
         }
 
-        cfs_mutex_unlock(&console_session.ses_mutex);
+       mutex_unlock(&console_session.ses_mutex);
 
         cfs_waitq_wait_event_interruptible_timeout(trans->tas_waitq,
                                               lstcon_rpc_trans_check(trans),
@@ -368,7 +367,7 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
 
         rc = (rc > 0)? 0: ((rc < 0)? -EINTR: -ETIMEDOUT);
 
-        cfs_mutex_lock(&console_session.ses_mutex);
+       mutex_lock(&console_session.ses_mutex);
 
         if (console_session.ses_shutdown)
                 rc = -ESHUTDOWN;
@@ -560,11 +559,11 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
                                            lstcon_rpc_t, crp_link) {
                 rpc = crpc->crp_rpc;
 
-                cfs_spin_lock(&rpc->crpc_lock);
+               spin_lock(&rpc->crpc_lock);
 
-                /* free it if not posted or finished already */
-                if (!crpc->crp_posted || crpc->crp_finished) {
-                        cfs_spin_unlock(&rpc->crpc_lock);
+               /* free it if not posted or finished already */
+               if (!crpc->crp_posted || crpc->crp_finished) {
+                       spin_unlock(&rpc->crpc_lock);
 
                         cfs_list_del_init(&crpc->crp_link);
                         lstcon_rpc_put(crpc);
@@ -584,7 +583,7 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
                 cfs_list_del_init(&crpc->crp_link);
                 count ++;
 
-                cfs_spin_unlock(&rpc->crpc_lock);
+               spin_unlock(&rpc->crpc_lock);
 
                 cfs_atomic_dec(&trans->tas_remaining);
         }
@@ -1190,10 +1189,10 @@ lstcon_rpc_pinger(void *arg)
         /* RPC pinger is a special case of transaction,
          * it's called by timer at 8 seconds interval.
          */
-        cfs_mutex_lock(&console_session.ses_mutex);
+       mutex_lock(&console_session.ses_mutex);
 
         if (console_session.ses_shutdown || console_session.ses_expired) {
-                cfs_mutex_unlock(&console_session.ses_mutex);
+               mutex_unlock(&console_session.ses_mutex);
                 return;
         }
 
@@ -1234,17 +1233,17 @@ lstcon_rpc_pinger(void *arg)
                         LASSERT (crpc->crp_trans == trans);
                         LASSERT (!cfs_list_empty(&crpc->crp_link));
 
-                        cfs_spin_lock(&crpc->crp_rpc->crpc_lock);
+                       spin_lock(&crpc->crp_rpc->crpc_lock);
 
-                        LASSERT (crpc->crp_posted);
+                       LASSERT(crpc->crp_posted);
 
-                        if (!crpc->crp_finished) {
-                                /* in flight */
-                                cfs_spin_unlock(&crpc->crp_rpc->crpc_lock);
-                                continue;
-                        }
+                       if (!crpc->crp_finished) {
+                               /* in flight */
+                               spin_unlock(&crpc->crp_rpc->crpc_lock);
+                               continue;
+                       }
 
-                        cfs_spin_unlock(&crpc->crp_rpc->crpc_lock);
+                       spin_unlock(&crpc->crp_rpc->crpc_lock);
 
                         lstcon_rpc_get_reply(crpc, &rep);
 
@@ -1280,7 +1279,7 @@ lstcon_rpc_pinger(void *arg)
         }
 
         if (console_session.ses_expired) {
-                cfs_mutex_unlock(&console_session.ses_mutex);
+               mutex_unlock(&console_session.ses_mutex);
                 return;
         }
 
@@ -1289,7 +1288,7 @@ lstcon_rpc_pinger(void *arg)
         ptimer->stt_expires = (cfs_time_t)(cfs_time_current_sec() + LST_PING_INTERVAL);
         stt_add_timer(ptimer);
 
-        cfs_mutex_unlock(&console_session.ses_mutex);
+       mutex_unlock(&console_session.ses_mutex);
 }
 
 int
@@ -1355,16 +1354,16 @@ lstcon_rpc_cleanup_wait(void)
                         cfs_waitq_signal(&trans->tas_waitq);
                 }
 
-                cfs_mutex_unlock(&console_session.ses_mutex);
+               mutex_unlock(&console_session.ses_mutex);
 
-                CWARN("Session is shutting down, "
-                      "waiting for termination of transactions\n");
-                cfs_pause(cfs_time_seconds(1));
+               CWARN("Session is shutting down, "
+                     "waiting for termination of transactions\n");
+               cfs_pause(cfs_time_seconds(1));
 
-                cfs_mutex_lock(&console_session.ses_mutex);
+               mutex_lock(&console_session.ses_mutex);
         }
 
-        cfs_spin_lock(&console_session.ses_rpc_lock);
+       spin_lock(&console_session.ses_rpc_lock);
 
         lst_wait_until((cfs_atomic_read(&console_session.ses_rpc_counter) == 0),
                        console_session.ses_rpc_lock,
@@ -1375,7 +1374,7 @@ lstcon_rpc_cleanup_wait(void)
         cfs_list_add(&zlist, &console_session.ses_rpc_freelist);
         cfs_list_del_init(&console_session.ses_rpc_freelist);
 
-        cfs_spin_unlock(&console_session.ses_rpc_lock);
+       spin_unlock(&console_session.ses_rpc_lock);
 
         while (!cfs_list_empty(&zlist)) {
                 crpc = cfs_list_entry(zlist.next, lstcon_rpc_t, crp_link);
@@ -1394,11 +1393,11 @@ lstcon_rpc_module_init(void)
 
         console_session.ses_ping = NULL;
 
-        cfs_spin_lock_init(&console_session.ses_rpc_lock);
-        cfs_atomic_set(&console_session.ses_rpc_counter, 0);
-        CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
+       spin_lock_init(&console_session.ses_rpc_lock);
+       cfs_atomic_set(&console_session.ses_rpc_counter, 0);
+       CFS_INIT_LIST_HEAD(&console_session.ses_rpc_freelist);
 
-        return 0;
+       return 0;
 }
 
 void
index 2f4b543..74f77af 100644 (file)
@@ -1855,7 +1855,7 @@ lstcon_session_feats_check(unsigned feats)
                return -EPROTO;
        }
 
-       cfs_spin_lock(&console_session.ses_rpc_lock);
+       spin_lock(&console_session.ses_rpc_lock);
 
        if (!console_session.ses_feats_updated) {
                console_session.ses_feats_updated = 1;
@@ -1865,7 +1865,7 @@ lstcon_session_feats_check(unsigned feats)
        if (console_session.ses_features != feats)
                rc = -EPROTO;
 
-       cfs_spin_unlock(&console_session.ses_rpc_lock);
+       spin_unlock(&console_session.ses_rpc_lock);
 
        if (rc != 0) {
                CERROR("remote features %x do not match with "
@@ -1889,7 +1889,7 @@ lstcon_acceptor_handle (srpc_server_rpc_t *rpc)
 
         sfw_unpack_message(req);
 
-        cfs_mutex_lock(&console_session.ses_mutex);
+       mutex_lock(&console_session.ses_mutex);
 
         jrep->join_sid = console_session.ses_id;
 
@@ -1954,7 +1954,7 @@ out:
         if (grp != NULL)
                 lstcon_group_put(grp);
 
-        cfs_mutex_unlock(&console_session.ses_mutex);
+       mutex_unlock(&console_session.ses_mutex);
 
         return rc;
 }
@@ -1991,7 +1991,7 @@ lstcon_console_init(void)
        console_session.ses_features        = LST_FEATS_MASK;
        console_session.ses_laststamp       = cfs_time_current_sec();
 
-        cfs_mutex_init(&console_session.ses_mutex);
+       mutex_init(&console_session.ses_mutex);
 
         CFS_INIT_LIST_HEAD(&console_session.ses_ndl_list);
         CFS_INIT_LIST_HEAD(&console_session.ses_grp_list);
@@ -2051,7 +2051,7 @@ lstcon_console_fini(void)
 
         libcfs_deregister_ioctl(&lstcon_ioctl_handler);
 
-        cfs_mutex_lock(&console_session.ses_mutex);
+       mutex_lock(&console_session.ses_mutex);
 
         srpc_shutdown_service(&lstcon_acceptor_service);
         srpc_remove_service(&lstcon_acceptor_service);
@@ -2061,7 +2061,7 @@ lstcon_console_fini(void)
 
         lstcon_rpc_module_fini();
 
-        cfs_mutex_unlock(&console_session.ses_mutex);
+       mutex_unlock(&console_session.ses_mutex);
 
         LASSERT (cfs_list_empty(&console_session.ses_ndl_list));
         LASSERT (cfs_list_empty(&console_session.ses_grp_list));
index 6ea5ac5..bbfb502 100644 (file)
@@ -132,7 +132,7 @@ typedef struct lstcon_test {
 #define LST_CONSOLE_TIMEOUT     300             /* default console timeout */
 
 typedef struct {
-        cfs_mutex_t             ses_mutex;      /* lock for session, only one thread can enter session */
+       struct mutex            ses_mutex;      /* only 1 thread in session */
         lst_sid_t               ses_id;         /* global session id */
         int                     ses_key;        /* local session key */
         int                     ses_state;      /* state of session */
@@ -160,7 +160,7 @@ typedef struct {
         cfs_list_t              ses_ndl_list;   /* global list of nodes */
         cfs_list_t             *ses_ndl_hash;   /* hash table of nodes */
 
-        cfs_spinlock_t          ses_rpc_lock;   /* serialize */
+       spinlock_t          ses_rpc_lock;   /* serialize */
         cfs_atomic_t            ses_rpc_counter;/* # of initialized RPCs */
         cfs_list_t              ses_rpc_freelist; /* idle console rpc */
 } lstcon_session_t;                             /*** session descriptor */
index 7d3e5af..d4ec74d 100644 (file)
@@ -106,10 +106,10 @@ struct smoketest_framework {
         cfs_list_t         fw_zombie_sessions; /* stopping sessions */
         cfs_list_t         fw_tests;           /* registered test cases */
         cfs_atomic_t       fw_nzombies;        /* # zombie sessions */
-        cfs_spinlock_t     fw_lock;            /* serialise */
-        sfw_session_t     *fw_session;         /* _the_ session */
-        int                fw_shuttingdown;    /* shutdown in progress */
-        srpc_server_rpc_t *fw_active_srpc;     /* running RPC */
+       spinlock_t         fw_lock;             /* serialise */
+       sfw_session_t     *fw_session;          /* _the_ session */
+       int                fw_shuttingdown;     /* shutdown in progress */
+       srpc_server_rpc_t *fw_active_srpc;      /* running RPC */
 } sfw_data;
 
 /* forward ref's */
@@ -215,14 +215,14 @@ sfw_deactivate_session (void)
         cfs_atomic_inc(&sfw_data.fw_nzombies);
         cfs_list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
 
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       spin_unlock(&sfw_data.fw_lock);
 
-        cfs_list_for_each_entry_typed (tsc, &sfw_data.fw_tests,
-                                       sfw_test_case_t, tsc_list) {
-                srpc_abort_service(tsc->tsc_srv_service);
-        }
+       cfs_list_for_each_entry_typed(tsc, &sfw_data.fw_tests,
+                                     sfw_test_case_t, tsc_list) {
+               srpc_abort_service(tsc->tsc_srv_service);
+       }
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
         cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
                                        sfw_batch_t, bat_list) {
@@ -236,12 +236,11 @@ sfw_deactivate_session (void)
                 return;   /* wait for active batches to stop */
 
         cfs_list_del_init(&sn->sn_list);
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       spin_unlock(&sfw_data.fw_lock);
 
-        sfw_destroy_session(sn);
+       sfw_destroy_session(sn);
 
-        cfs_spin_lock(&sfw_data.fw_lock);
-        return;
+       spin_lock(&sfw_data.fw_lock);
 }
 
 #ifndef __KERNEL__
@@ -257,9 +256,9 @@ sfw_session_removed (void)
 void
 sfw_session_expired (void *data)
 {
-        sfw_session_t *sn = data;
+       sfw_session_t *sn = data;
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
         LASSERT (sn->sn_timer_active);
         LASSERT (sn == sfw_data.fw_session);
@@ -271,8 +270,7 @@ sfw_session_expired (void *data)
         sn->sn_timer_active = 0;
         sfw_deactivate_session();
 
-        cfs_spin_unlock(&sfw_data.fw_lock);
-        return;
+       spin_unlock(&sfw_data.fw_lock);
 }
 
 static inline void
@@ -336,14 +334,13 @@ sfw_client_rpc_fini (srpc_client_rpc_t *rpc)
                 swi_state2str(rpc->crpc_wi.swi_state),
                 rpc->crpc_aborted, rpc->crpc_status);
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
-        /* my callers must finish all RPCs before shutting me down */
-        LASSERT (!sfw_data.fw_shuttingdown);
-        cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
+       /* my callers must finish all RPCs before shutting me down */
+       LASSERT(!sfw_data.fw_shuttingdown);
+       cfs_list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
 
-        cfs_spin_unlock(&sfw_data.fw_lock);
-        return;
+       spin_unlock(&sfw_data.fw_lock);
 }
 
 sfw_batch_t *
@@ -484,18 +481,18 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
        sfw_init_session(sn, request->mksn_sid,
                         msg->msg_ses_feats, &request->mksn_name[0]);
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
-        sfw_deactivate_session();
-        LASSERT (sfw_data.fw_session == NULL);
-        sfw_data.fw_session = sn;
+       sfw_deactivate_session();
+       LASSERT(sfw_data.fw_session == NULL);
+       sfw_data.fw_session = sn;
 
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       spin_unlock(&sfw_data.fw_lock);
 
-        reply->mksn_status  = 0;
-        reply->mksn_sid     = sn->sn_id;
-        reply->mksn_timeout = sn->sn_timeout;
-        return 0;
+       reply->mksn_status  = 0;
+       reply->mksn_sid     = sn->sn_id;
+       reply->mksn_timeout = sn->sn_timeout;
+       return 0;
 }
 
 int
@@ -520,14 +517,14 @@ sfw_remove_session (srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
                 return 0;
         }
 
-        cfs_spin_lock(&sfw_data.fw_lock);
-        sfw_deactivate_session();
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
+       sfw_deactivate_session();
+       spin_unlock(&sfw_data.fw_lock);
 
-        reply->rmsn_status = 0;
-        reply->rmsn_sid    = LST_INVALID_SID;
-        LASSERT (sfw_data.fw_session == NULL);
-        return 0;
+       reply->rmsn_status = 0;
+       reply->rmsn_sid    = LST_INVALID_SID;
+       LASSERT(sfw_data.fw_session == NULL);
+       return 0;
 }
 
 int
@@ -760,7 +757,7 @@ sfw_add_test_instance (sfw_batch_t *tsb, srpc_server_rpc_t *rpc)
         }
 
         memset(tsi, 0, sizeof(*tsi));
-        cfs_spin_lock_init(&tsi->tsi_lock);
+       spin_lock_init(&tsi->tsi_lock);
         cfs_atomic_set(&tsi->tsi_nactive, 0);
         CFS_INIT_LIST_HEAD(&tsi->tsi_units);
         CFS_INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
@@ -856,17 +853,17 @@ sfw_test_unit_done (sfw_test_unit_t *tsu)
                 return;
 
         /* the test instance is done */
-        cfs_spin_lock(&tsi->tsi_lock);
+       spin_lock(&tsi->tsi_lock);
 
-        tsi->tsi_stopping = 0;
+       tsi->tsi_stopping = 0;
 
-        cfs_spin_unlock(&tsi->tsi_lock);
+       spin_unlock(&tsi->tsi_lock);
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
-        if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
-            sn == sfw_data.fw_session) {               /* sn also active */
-                cfs_spin_unlock(&sfw_data.fw_lock);
+       if (!cfs_atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
+           sn == sfw_data.fw_session) {                  /* sn also active */
+               spin_unlock(&sfw_data.fw_lock);
                 return;
         }
 
@@ -875,16 +872,16 @@ sfw_test_unit_done (sfw_test_unit_t *tsu)
         cfs_list_for_each_entry_typed (tsb, &sn->sn_batches,
                                        sfw_batch_t, bat_list) {
                 if (sfw_batch_active(tsb)) {
-                        cfs_spin_unlock(&sfw_data.fw_lock);
-                        return;
-                }
-        }
+                       spin_unlock(&sfw_data.fw_lock);
+                       return;
+               }
+       }
 
-        cfs_list_del_init(&sn->sn_list);
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       cfs_list_del_init(&sn->sn_list);
+       spin_unlock(&sfw_data.fw_lock);
 
-        sfw_destroy_session(sn);
-        return;
+       sfw_destroy_session(sn);
+       return;
 }
 
 void
@@ -896,7 +893,7 @@ sfw_test_rpc_done (srpc_client_rpc_t *rpc)
 
         tsi->tsi_ops->tso_done_rpc(tsu, rpc);
 
-        cfs_spin_lock(&tsi->tsi_lock);
+       spin_lock(&tsi->tsi_lock);
 
         LASSERT (sfw_test_active(tsi));
         LASSERT (!cfs_list_empty(&rpc->crpc_list));
@@ -912,7 +909,7 @@ sfw_test_rpc_done (srpc_client_rpc_t *rpc)
         /* dec ref for poster */
         srpc_client_rpc_decref(rpc);
 
-        cfs_spin_unlock(&tsi->tsi_lock);
+       spin_unlock(&tsi->tsi_lock);
 
         if (!done) {
                 swi_schedule_workitem(&tsu->tsu_worker);
@@ -928,10 +925,10 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
                    unsigned features, int nblk, int blklen,
                    srpc_client_rpc_t **rpcpp)
 {
-        srpc_client_rpc_t   *rpc = NULL;
-        sfw_test_instance_t *tsi = tsu->tsu_instance;
+       srpc_client_rpc_t   *rpc = NULL;
+       sfw_test_instance_t *tsi = tsu->tsu_instance;
 
-        cfs_spin_lock(&tsi->tsi_lock);
+       spin_lock(&tsi->tsi_lock);
 
         LASSERT (sfw_test_active(tsi));
 
@@ -943,7 +940,7 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
                 cfs_list_del_init(&rpc->crpc_list);
         }
 
-        cfs_spin_unlock(&tsi->tsi_lock);
+       spin_unlock(&tsi->tsi_lock);
 
        if (rpc == NULL) {
                rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
@@ -982,26 +979,26 @@ sfw_run_test (swi_workitem_t *wi)
 
         LASSERT (rpc != NULL);
 
-        cfs_spin_lock(&tsi->tsi_lock);
+       spin_lock(&tsi->tsi_lock);
 
-        if (tsi->tsi_stopping) {
-                cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
-                cfs_spin_unlock(&tsi->tsi_lock);
-                goto test_done;
-        }
+       if (tsi->tsi_stopping) {
+               cfs_list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
+               spin_unlock(&tsi->tsi_lock);
+               goto test_done;
+       }
 
-        if (tsu->tsu_loop > 0)
-                tsu->tsu_loop--;
+       if (tsu->tsu_loop > 0)
+               tsu->tsu_loop--;
 
-        cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
-        cfs_spin_unlock(&tsi->tsi_lock);
+       cfs_list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
+       spin_unlock(&tsi->tsi_lock);
 
-        rpc->crpc_timeout = rpc_timeout;
+       rpc->crpc_timeout = rpc_timeout;
 
-        cfs_spin_lock(&rpc->crpc_lock);
-        srpc_post_rpc(rpc);
-        cfs_spin_unlock(&rpc->crpc_lock);
-        return 0;
+       spin_lock(&rpc->crpc_lock);
+       srpc_post_rpc(rpc);
+       spin_unlock(&rpc->crpc_lock);
+       return 0;
 
 test_done:
         /*
@@ -1067,35 +1064,35 @@ sfw_stop_batch (sfw_batch_t *tsb, int force)
 
         cfs_list_for_each_entry_typed (tsi, &tsb->bat_tests,
                                        sfw_test_instance_t, tsi_list) {
-                cfs_spin_lock(&tsi->tsi_lock);
+               spin_lock(&tsi->tsi_lock);
 
-                if (!tsi->tsi_is_client ||
-                    !sfw_test_active(tsi) || tsi->tsi_stopping) {
-                        cfs_spin_unlock(&tsi->tsi_lock);
-                        continue;
-                }
+               if (!tsi->tsi_is_client ||
+                   !sfw_test_active(tsi) || tsi->tsi_stopping) {
+                       spin_unlock(&tsi->tsi_lock);
+                       continue;
+               }
 
-                tsi->tsi_stopping = 1;
+               tsi->tsi_stopping = 1;
 
-                if (!force) {
-                        cfs_spin_unlock(&tsi->tsi_lock);
-                        continue;
-                }
+               if (!force) {
+                       spin_unlock(&tsi->tsi_lock);
+                       continue;
+               }
 
-                /* abort launched rpcs in the test */
-                cfs_list_for_each_entry_typed (rpc, &tsi->tsi_active_rpcs,
-                                               srpc_client_rpc_t, crpc_list) {
-                        cfs_spin_lock(&rpc->crpc_lock);
+               /* abort launched rpcs in the test */
+               cfs_list_for_each_entry_typed(rpc, &tsi->tsi_active_rpcs,
+                                             srpc_client_rpc_t, crpc_list) {
+                       spin_lock(&rpc->crpc_lock);
 
-                        srpc_abort_rpc(rpc, -EINTR);
+                       srpc_abort_rpc(rpc, -EINTR);
 
-                        cfs_spin_unlock(&rpc->crpc_lock);
-                }
+                       spin_unlock(&rpc->crpc_lock);
+               }
 
-                cfs_spin_unlock(&tsi->tsi_lock);
-        }
+               spin_unlock(&tsi->tsi_lock);
+       }
 
-        return 0;
+       return 0;
 }
 
 int
@@ -1264,29 +1261,29 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
        unsigned        features = LST_FEATS_MASK;
        int             rc = 0;
 
-        LASSERT (sfw_data.fw_active_srpc == NULL);
-        LASSERT (sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+       LASSERT(sfw_data.fw_active_srpc == NULL);
+       LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
-        if (sfw_data.fw_shuttingdown) {
-                cfs_spin_unlock(&sfw_data.fw_lock);
-                return -ESHUTDOWN;
-        }
+       if (sfw_data.fw_shuttingdown) {
+               spin_unlock(&sfw_data.fw_lock);
+               return -ESHUTDOWN;
+       }
 
-        /* Remove timer to avoid racing with it or expiring active session */
-        if (sfw_del_session_timer() != 0) {
-                CERROR ("Dropping RPC (%s) from %s: racing with expiry timer.",
-                        sv->sv_name, libcfs_id2str(rpc->srpc_peer));
-                cfs_spin_unlock(&sfw_data.fw_lock);
-                return -EAGAIN;
-        }
+       /* Remove timer to avoid racing with it or expiring active session */
+       if (sfw_del_session_timer() != 0) {
+               CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
+                      sv->sv_name, libcfs_id2str(rpc->srpc_peer));
+               spin_unlock(&sfw_data.fw_lock);
+               return -EAGAIN;
+       }
 
-        sfw_data.fw_active_srpc = rpc;
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       sfw_data.fw_active_srpc = rpc;
+       spin_unlock(&sfw_data.fw_lock);
 
-        sfw_unpack_message(request);
-        LASSERT (request->msg_type == srpc_service2request(sv->sv_id));
+       sfw_unpack_message(request);
+       LASSERT(request->msg_type == srpc_service2request(sv->sv_id));
 
        /* rpc module should have checked this */
        LASSERT(request->msg_version == SRPC_MSG_VERSION);
@@ -1350,20 +1347,20 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
                features = sfw_data.fw_session->sn_features;
  out:
        reply->msg_ses_feats = features;
-        rpc->srpc_done = sfw_server_rpc_done;
-        cfs_spin_lock(&sfw_data.fw_lock);
+       rpc->srpc_done = sfw_server_rpc_done;
+       spin_lock(&sfw_data.fw_lock);
 
 #ifdef __KERNEL__
-        if (!sfw_data.fw_shuttingdown)
-                sfw_add_session_timer();
+       if (!sfw_data.fw_shuttingdown)
+               sfw_add_session_timer();
 #else
-        LASSERT (!sfw_data.fw_shuttingdown);
-        sfw_add_session_timer();
+       LASSERT(!sfw_data.fw_shuttingdown);
+       sfw_add_session_timer();
 #endif
 
-        sfw_data.fw_active_srpc = NULL;
-        cfs_spin_unlock(&sfw_data.fw_lock);
-        return rc;
+       sfw_data.fw_active_srpc = NULL;
+       spin_unlock(&sfw_data.fw_lock);
+       return rc;
 }
 
 int
@@ -1372,51 +1369,51 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
        struct srpc_service     *sv = rpc->srpc_scd->scd_svc;
        int                     rc;
 
-        LASSERT (rpc->srpc_bulk != NULL);
-        LASSERT (sv->sv_id == SRPC_SERVICE_TEST);
-        LASSERT (sfw_data.fw_active_srpc == NULL);
-        LASSERT (rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
+       LASSERT(rpc->srpc_bulk != NULL);
+       LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
+       LASSERT(sfw_data.fw_active_srpc == NULL);
+       LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
-        if (status != 0) {
-                CERROR ("Bulk transfer failed for RPC: "
-                        "service %s, peer %s, status %d\n",
-                        sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
-                cfs_spin_unlock(&sfw_data.fw_lock);
-                return -EIO;
-        }
+       if (status != 0) {
+               CERROR("Bulk transfer failed for RPC: "
+                      "service %s, peer %s, status %d\n",
+                      sv->sv_name, libcfs_id2str(rpc->srpc_peer), status);
+               spin_unlock(&sfw_data.fw_lock);
+               return -EIO;
+       }
 
-        if (sfw_data.fw_shuttingdown) {
-                cfs_spin_unlock(&sfw_data.fw_lock);
-                return -ESHUTDOWN;
-        }
+       if (sfw_data.fw_shuttingdown) {
+               spin_unlock(&sfw_data.fw_lock);
+               return -ESHUTDOWN;
+       }
 
-        if (sfw_del_session_timer() != 0) {
-                CERROR ("Dropping RPC (%s) from %s: racing with expiry timer",
-                        sv->sv_name, libcfs_id2str(rpc->srpc_peer));
-                cfs_spin_unlock(&sfw_data.fw_lock);
-                return -EAGAIN;
-        }
+       if (sfw_del_session_timer() != 0) {
+               CERROR("Dropping RPC (%s) from %s: racing with expiry timer",
+                      sv->sv_name, libcfs_id2str(rpc->srpc_peer));
+               spin_unlock(&sfw_data.fw_lock);
+               return -EAGAIN;
+       }
 
-        sfw_data.fw_active_srpc = rpc;
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       sfw_data.fw_active_srpc = rpc;
+       spin_unlock(&sfw_data.fw_lock);
 
-        rc = sfw_add_test(rpc);
+       rc = sfw_add_test(rpc);
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
 #ifdef __KERNEL__
-        if (!sfw_data.fw_shuttingdown)
-                sfw_add_session_timer();
+       if (!sfw_data.fw_shuttingdown)
+               sfw_add_session_timer();
 #else
-        LASSERT (!sfw_data.fw_shuttingdown);
-        sfw_add_session_timer();
+       LASSERT(!sfw_data.fw_shuttingdown);
+       sfw_add_session_timer();
 #endif
 
-        sfw_data.fw_active_srpc = NULL;
-        cfs_spin_unlock(&sfw_data.fw_lock);
-        return rc;
+       sfw_data.fw_active_srpc = NULL;
+       spin_unlock(&sfw_data.fw_lock);
+       return rc;
 }
 
 srpc_client_rpc_t *
@@ -1426,7 +1423,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
 {
        srpc_client_rpc_t *rpc = NULL;
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
         LASSERT (!sfw_data.fw_shuttingdown);
         LASSERT (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -1440,7 +1437,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
                                      done, sfw_client_rpc_fini, priv);
         }
 
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       spin_unlock(&sfw_data.fw_lock);
 
        if (rpc == NULL) {
                rpc = srpc_create_client_rpc(peer, service,
@@ -1603,19 +1600,19 @@ sfw_unpack_message (srpc_msg_t *msg)
 void
 sfw_abort_rpc (srpc_client_rpc_t *rpc)
 {
-        LASSERT (cfs_atomic_read(&rpc->crpc_refcount) > 0);
-        LASSERT (rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
+       LASSERT(cfs_atomic_read(&rpc->crpc_refcount) > 0);
+       LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
 
-        cfs_spin_lock(&rpc->crpc_lock);
-        srpc_abort_rpc(rpc, -EINTR);
-        cfs_spin_unlock(&rpc->crpc_lock);
-        return;
+       spin_lock(&rpc->crpc_lock);
+       srpc_abort_rpc(rpc, -EINTR);
+       spin_unlock(&rpc->crpc_lock);
+       return;
 }
 
 void
 sfw_post_rpc (srpc_client_rpc_t *rpc)
 {
-        cfs_spin_lock(&rpc->crpc_lock);
+       spin_lock(&rpc->crpc_lock);
 
         LASSERT (!rpc->crpc_closed);
         LASSERT (!rpc->crpc_aborted);
@@ -1625,8 +1622,8 @@ sfw_post_rpc (srpc_client_rpc_t *rpc)
         rpc->crpc_timeout = rpc_timeout;
         srpc_post_rpc(rpc);
 
-        cfs_spin_unlock(&rpc->crpc_lock);
-        return;
+       spin_unlock(&rpc->crpc_lock);
+       return;
 }
 
 static srpc_service_t sfw_services[] = 
@@ -1722,7 +1719,7 @@ sfw_startup (void)
 
         sfw_data.fw_session     = NULL;
         sfw_data.fw_active_srpc = NULL;
-        cfs_spin_lock_init(&sfw_data.fw_lock);
+       spin_lock_init(&sfw_data.fw_lock);
         cfs_atomic_set(&sfw_data.fw_nzombies, 0);
         CFS_INIT_LIST_HEAD(&sfw_data.fw_tests);
         CFS_INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
@@ -1790,11 +1787,11 @@ sfw_startup (void)
 void
 sfw_shutdown (void)
 {
-        srpc_service_t  *sv;
-        sfw_test_case_t *tsc;
-        int              i;
+       srpc_service_t  *sv;
+       sfw_test_case_t *tsc;
+       int              i;
 
-        cfs_spin_lock(&sfw_data.fw_lock);
+       spin_lock(&sfw_data.fw_lock);
 
         sfw_data.fw_shuttingdown = 1;
 #ifdef __KERNEL__
@@ -1814,7 +1811,7 @@ sfw_shutdown (void)
                        "waiting for %d zombie sessions to die.\n",
                        cfs_atomic_read(&sfw_data.fw_nzombies));
 
-        cfs_spin_unlock(&sfw_data.fw_lock);
+       spin_unlock(&sfw_data.fw_lock);
 
         for (i = 0; ; i++) {
                 sv = &sfw_services[i];
index 8a1ddd3..72b161e 100644 (file)
@@ -46,8 +46,8 @@ int ping_srv_workitems = SFW_TEST_WI_MAX;
 CFS_MODULE_PARM(ping_srv_workitems, "i", int, 0644, "# PING server workitems");
 
 typedef struct {
-        cfs_spinlock_t  pnd_lock;       /* serialize */
-        int             pnd_counter;    /* sequence counter */
+       spinlock_t      pnd_lock;       /* serialize */
+       int             pnd_counter;    /* sequence counter */
 } lst_ping_data_t;
 
 static lst_ping_data_t  lst_ping_data;
@@ -60,10 +60,10 @@ ping_client_init(sfw_test_instance_t *tsi)
        LASSERT(tsi->tsi_is_client);
        LASSERT(sn != NULL && (sn->sn_features & ~LST_FEATS_MASK) == 0);
 
-        cfs_spin_lock_init(&lst_ping_data.pnd_lock);
-        lst_ping_data.pnd_counter = 0;
+       spin_lock_init(&lst_ping_data.pnd_lock);
+       lst_ping_data.pnd_counter = 0;
 
-        return 0;
+       return 0;
 }
 
 static void
@@ -103,15 +103,15 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu,
 
         req->pnr_magic = LST_PING_TEST_MAGIC;
 
-        cfs_spin_lock(&lst_ping_data.pnd_lock);
-        req->pnr_seq = lst_ping_data.pnd_counter ++;
-        cfs_spin_unlock(&lst_ping_data.pnd_lock);
+       spin_lock(&lst_ping_data.pnd_lock);
+       req->pnr_seq = lst_ping_data.pnd_counter++;
+       spin_unlock(&lst_ping_data.pnd_lock);
 
-        cfs_fs_timeval(&tv);
-        req->pnr_time_sec  = tv.tv_sec;
-        req->pnr_time_usec = tv.tv_usec;
+       cfs_fs_timeval(&tv);
+       req->pnr_time_sec  = tv.tv_sec;
+       req->pnr_time_usec = tv.tv_usec;
 
-        return rc;
+       return rc;
 }
 
 static void
index ab7cee6..3f03e5f 100644 (file)
@@ -53,12 +53,12 @@ typedef enum {
 } srpc_state_t;
 
 struct smoketest_rpc {
-        cfs_spinlock_t    rpc_glock;     /* global lock */
-        srpc_service_t   *rpc_services[SRPC_SERVICE_MAX_ID + 1];
-        lnet_handle_eq_t  rpc_lnet_eq;   /* _the_ LNet event queue */
-        srpc_state_t      rpc_state;
-        srpc_counters_t   rpc_counters;
-        __u64             rpc_matchbits; /* matchbits counter */
+       spinlock_t       rpc_glock;     /* global lock */
+       srpc_service_t  *rpc_services[SRPC_SERVICE_MAX_ID + 1];
+       lnet_handle_eq_t rpc_lnet_eq;   /* _the_ LNet event queue */
+       srpc_state_t     rpc_state;
+       srpc_counters_t  rpc_counters;
+       __u64            rpc_matchbits; /* matchbits counter */
 } srpc_data;
 
 static inline int
@@ -73,16 +73,16 @@ int srpc_handle_rpc (swi_workitem_t *wi);
 
 void srpc_get_counters (srpc_counters_t *cnt)
 {
-        cfs_spin_lock(&srpc_data.rpc_glock);
-        *cnt = srpc_data.rpc_counters;
-        cfs_spin_unlock(&srpc_data.rpc_glock);
+       spin_lock(&srpc_data.rpc_glock);
+       *cnt = srpc_data.rpc_counters;
+       spin_unlock(&srpc_data.rpc_glock);
 }
 
 void srpc_set_counters (const srpc_counters_t *cnt)
 {
-        cfs_spin_lock(&srpc_data.rpc_glock);
-        srpc_data.rpc_counters = *cnt;
-        cfs_spin_unlock(&srpc_data.rpc_glock);
+       spin_lock(&srpc_data.rpc_glock);
+       srpc_data.rpc_counters = *cnt;
+       spin_unlock(&srpc_data.rpc_glock);
 }
 
 int
@@ -192,12 +192,12 @@ srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
 static inline __u64
 srpc_next_id (void)
 {
-        __u64 id;
+       __u64 id;
 
-        cfs_spin_lock(&srpc_data.rpc_glock);
-        id = srpc_data.rpc_matchbits++;
-        cfs_spin_unlock(&srpc_data.rpc_glock);
-        return id;
+       spin_lock(&srpc_data.rpc_glock);
+       id = srpc_data.rpc_matchbits++;
+       spin_unlock(&srpc_data.rpc_glock);
+       return id;
 }
 
 void
@@ -298,7 +298,7 @@ srpc_service_init(struct srpc_service *svc)
        cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
                scd->scd_cpt = i;
                scd->scd_svc = svc;
-               cfs_spin_lock_init(&scd->scd_lock);
+               spin_lock_init(&scd->scd_lock);
                CFS_INIT_LIST_HEAD(&scd->scd_rpc_free);
                CFS_INIT_LIST_HEAD(&scd->scd_rpc_active);
                CFS_INIT_LIST_HEAD(&scd->scd_buf_posted);
@@ -344,17 +344,17 @@ srpc_add_service(struct srpc_service *sv)
        if (srpc_service_init(sv) != 0)
                return -ENOMEM;
 
-       cfs_spin_lock(&srpc_data.rpc_glock);
+       spin_lock(&srpc_data.rpc_glock);
 
        LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
 
        if (srpc_data.rpc_services[id] != NULL) {
-               cfs_spin_unlock(&srpc_data.rpc_glock);
+               spin_unlock(&srpc_data.rpc_glock);
                goto failed;
        }
 
        srpc_data.rpc_services[id] = sv;
-       cfs_spin_unlock(&srpc_data.rpc_glock);
+       spin_unlock(&srpc_data.rpc_glock);
 
        CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
        return 0;
@@ -367,18 +367,18 @@ srpc_add_service(struct srpc_service *sv)
 int
 srpc_remove_service (srpc_service_t *sv)
 {
-        int id = sv->sv_id;
+       int id = sv->sv_id;
 
-        cfs_spin_lock(&srpc_data.rpc_glock);
+       spin_lock(&srpc_data.rpc_glock);
 
-        if (srpc_data.rpc_services[id] != sv) {
-                cfs_spin_unlock(&srpc_data.rpc_glock);
-                return -ENOENT;
-        }
+       if (srpc_data.rpc_services[id] != sv) {
+               spin_unlock(&srpc_data.rpc_glock);
+               return -ENOENT;
+       }
 
-        srpc_data.rpc_services[id] = NULL;
-        cfs_spin_unlock(&srpc_data.rpc_glock);
-        return 0;
+       srpc_data.rpc_services[id] = NULL;
+       spin_unlock(&srpc_data.rpc_glock);
+       return 0;
 }
 
 int
@@ -506,7 +506,7 @@ srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
        LNetInvalidateHandle(&buf->buf_mdh);
        cfs_list_add(&buf->buf_list, &scd->scd_buf_posted);
        scd->scd_buf_nposted++;
-       cfs_spin_unlock(&scd->scd_lock);
+       spin_unlock(&scd->scd_lock);
 
        rc = srpc_post_passive_rqtbuf(sv->sv_id,
                                      !srpc_serv_is_framework(sv),
@@ -517,17 +517,17 @@ srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
         * msg and its event handler has been called. So we must add
         * buf to scd_buf_posted _before_ dropping scd_lock */
 
-       cfs_spin_lock(&scd->scd_lock);
+       spin_lock(&scd->scd_lock);
 
        if (rc == 0) {
                if (!sv->sv_shuttingdown)
                        return 0;
 
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
                /* srpc_shutdown_service might have tried to unlink me
                 * when my buf_mdh was still invalid */
                LNetMDUnlink(buf->buf_mdh);
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
                return 0;
        }
 
@@ -536,11 +536,11 @@ srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
                return rc; /* don't allow to change scd_buf_posted */
 
        cfs_list_del(&buf->buf_list);
-       cfs_spin_unlock(&scd->scd_lock);
+       spin_unlock(&scd->scd_lock);
 
        LIBCFS_FREE(buf, sizeof(*buf));
 
-       cfs_spin_lock(&scd->scd_lock);
+       spin_lock(&scd->scd_lock);
        return rc;
 }
 
@@ -554,30 +554,30 @@ srpc_add_buffer(struct swi_workitem *wi)
        /* it's called by workitem scheduler threads, these threads
         * should have been set CPT affinity, so buffers will be posted
         * on CPT local list of Portal */
-       cfs_spin_lock(&scd->scd_lock);
+       spin_lock(&scd->scd_lock);
 
        while (scd->scd_buf_adjust > 0 &&
               !scd->scd_svc->sv_shuttingdown) {
                scd->scd_buf_adjust--; /* consume it */
                scd->scd_buf_posting++;
 
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
 
                LIBCFS_ALLOC(buf, sizeof(*buf));
                if (buf == NULL) {
                        CERROR("Failed to add new buf to service: %s\n",
                               scd->scd_svc->sv_name);
-                       cfs_spin_lock(&scd->scd_lock);
+                       spin_lock(&scd->scd_lock);
                        rc = -ENOMEM;
                        break;
                }
 
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
                if (scd->scd_svc->sv_shuttingdown) {
-                       cfs_spin_unlock(&scd->scd_lock);
+                       spin_unlock(&scd->scd_lock);
                        LIBCFS_FREE(buf, sizeof(*buf));
 
-                       cfs_spin_lock(&scd->scd_lock);
+                       spin_lock(&scd->scd_lock);
                        rc = -ESHUTDOWN;
                        break;
                }
@@ -600,7 +600,7 @@ srpc_add_buffer(struct swi_workitem *wi)
                scd->scd_buf_posting--;
        }
 
-       cfs_spin_unlock(&scd->scd_lock);
+       spin_unlock(&scd->scd_lock);
        return 0;
 }
 
@@ -614,7 +614,7 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
        LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
 
        cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
 
                scd->scd_buf_err = 0;
                scd->scd_buf_err_stamp = 0;
@@ -622,7 +622,7 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
                scd->scd_buf_adjust = nbuffer;
                /* start to post buffers */
                swi_schedule_workitem(&scd->scd_buf_wi);
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
 
                /* framework service only post buffer for one partition  */
                if (srpc_serv_is_framework(sv))
@@ -630,7 +630,7 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
        }
 
        cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
                /*
                 * NB: srpc_service_add_buffers() can be called inside
                 * thread context of lst_sched_serial, and we don't normally
@@ -652,7 +652,7 @@ srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
                if (scd->scd_buf_err != 0 && rc == 0)
                        rc = scd->scd_buf_err;
 
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
        }
 
        return rc;
@@ -668,12 +668,12 @@ srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
        LASSERT(!sv->sv_shuttingdown);
 
        cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
 
                num = scd->scd_buf_total + scd->scd_buf_posting;
                scd->scd_buf_adjust -= min(nbuffer, num);
 
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
        }
 }
 
@@ -688,19 +688,19 @@ srpc_finish_service(struct srpc_service *sv)
        LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
 
        cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
                if (!swi_deschedule_workitem(&scd->scd_buf_wi))
                        return 0;
 
                if (scd->scd_buf_nposted > 0) {
                        CDEBUG(D_NET, "waiting for %d posted buffers to unlink",
                               scd->scd_buf_nposted);
-                       cfs_spin_unlock(&scd->scd_lock);
+                       spin_unlock(&scd->scd_lock);
                        return 0;
                }
 
                if (cfs_list_empty(&scd->scd_rpc_active)) {
-                       cfs_spin_unlock(&scd->scd_lock);
+                       spin_unlock(&scd->scd_lock);
                        continue;
                }
 
@@ -715,7 +715,7 @@ srpc_finish_service(struct srpc_service *sv)
                        rpc->srpc_wi.swi_workitem.wi_running,
                        rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
                        rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
                return 0;
        }
 
@@ -750,9 +750,9 @@ srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
                }
        }
 
-       cfs_spin_unlock(&scd->scd_lock);
+       spin_unlock(&scd->scd_lock);
        LIBCFS_FREE(buf, sizeof(*buf));
-       cfs_spin_lock(&scd->scd_lock);
+       spin_lock(&scd->scd_lock);
 }
 
 void
@@ -766,7 +766,7 @@ srpc_abort_service(struct srpc_service *sv)
               sv->sv_id, sv->sv_name);
 
        cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
 
                /* schedule in-flight RPCs to notice the abort, NB:
                 * racing with incoming RPCs; complete fix should make test
@@ -776,7 +776,7 @@ srpc_abort_service(struct srpc_service *sv)
                        swi_schedule_workitem(&rpc->srpc_wi);
                }
 
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
        }
 }
 
@@ -792,21 +792,21 @@ srpc_shutdown_service(srpc_service_t *sv)
               sv->sv_id, sv->sv_name);
 
        cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
 
        sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
 
        cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
 
        cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
 
                /* schedule in-flight RPCs to notice the shutdown */
                cfs_list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
                        swi_schedule_workitem(&rpc->srpc_wi);
 
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
 
                /* OK to traverse scd_buf_posted without lock, since no one
                 * touches scd_buf_posted now */
@@ -944,16 +944,16 @@ srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
                 swi_state2str(rpc->srpc_wi.swi_state), status);
 
         if (status != 0) {
-                cfs_spin_lock(&srpc_data.rpc_glock);
-                srpc_data.rpc_counters.rpcs_dropped++;
-                cfs_spin_unlock(&srpc_data.rpc_glock);
-        }
+               spin_lock(&srpc_data.rpc_glock);
+               srpc_data.rpc_counters.rpcs_dropped++;
+               spin_unlock(&srpc_data.rpc_glock);
+       }
 
-        if (rpc->srpc_done != NULL)
-                (*rpc->srpc_done) (rpc);
-        LASSERT (rpc->srpc_bulk == NULL);
+       if (rpc->srpc_done != NULL)
+               (*rpc->srpc_done) (rpc);
+       LASSERT(rpc->srpc_bulk == NULL);
 
-       cfs_spin_lock(&scd->scd_lock);
+       spin_lock(&scd->scd_lock);
 
        if (rpc->srpc_reqstbuf != NULL) {
                /* NB might drop sv_lock in srpc_service_recycle_buffer, but
@@ -985,7 +985,7 @@ srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
                cfs_list_add(&rpc->srpc_list, &scd->scd_rpc_free);
        }
 
-       cfs_spin_unlock(&scd->scd_lock);
+       spin_unlock(&scd->scd_lock);
        return;
 }
 
@@ -1001,10 +1001,10 @@ srpc_handle_rpc(swi_workitem_t *wi)
 
        LASSERT(wi == &rpc->srpc_wi);
 
-       cfs_spin_lock(&scd->scd_lock);
+       spin_lock(&scd->scd_lock);
 
        if (sv->sv_shuttingdown || rpc->srpc_aborted) {
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
 
                 if (rpc->srpc_bulk != NULL)
                         LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
@@ -1017,7 +1017,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
                 return 0;
         }
 
-       cfs_spin_unlock(&scd->scd_lock);
+       spin_unlock(&scd->scd_lock);
 
         switch (wi->swi_state) {
         default:
@@ -1111,17 +1111,16 @@ srpc_client_rpc_expired (void *data)
                rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
                rpc->crpc_timeout);
 
-        cfs_spin_lock(&rpc->crpc_lock);
+       spin_lock(&rpc->crpc_lock);
 
-        rpc->crpc_timeout = 0;
-        srpc_abort_rpc(rpc, -ETIMEDOUT);
+       rpc->crpc_timeout = 0;
+       srpc_abort_rpc(rpc, -ETIMEDOUT);
 
-        cfs_spin_unlock(&rpc->crpc_lock);
+       spin_unlock(&rpc->crpc_lock);
 
-        cfs_spin_lock(&srpc_data.rpc_glock);
-        srpc_data.rpc_counters.rpcs_expired++;
-        cfs_spin_unlock(&srpc_data.rpc_glock);
-        return;
+       spin_lock(&srpc_data.rpc_glock);
+       srpc_data.rpc_counters.rpcs_expired++;
+       spin_unlock(&srpc_data.rpc_glock);
 }
 
 inline void
@@ -1148,35 +1147,36 @@ srpc_add_client_rpc_timer (srpc_client_rpc_t *rpc)
 void
 srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
 {
-        /* timer not planted or already exploded */
-        if (rpc->crpc_timeout == 0) return;
+       /* timer not planted or already exploded */
+       if (rpc->crpc_timeout == 0)
+               return;
 
-        /* timer sucessfully defused */
-        if (stt_del_timer(&rpc->crpc_timer)) return;
+       /* timer sucessfully defused */
+       if (stt_del_timer(&rpc->crpc_timer))
+               return;
 
 #ifdef __KERNEL__
-        /* timer detonated, wait for it to explode */
-        while (rpc->crpc_timeout != 0) {
-                cfs_spin_unlock(&rpc->crpc_lock);
+       /* timer detonated, wait for it to explode */
+       while (rpc->crpc_timeout != 0) {
+               spin_unlock(&rpc->crpc_lock);
 
-                cfs_schedule();
+               cfs_schedule();
 
-                cfs_spin_lock(&rpc->crpc_lock);
-        }
+               spin_lock(&rpc->crpc_lock);
+       }
 #else
-        LBUG(); /* impossible in single-threaded runtime */
+       LBUG(); /* impossible in single-threaded runtime */
 #endif
-        return;
 }
 
 void
 srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
 {
-        swi_workitem_t *wi = &rpc->crpc_wi;
+       swi_workitem_t *wi = &rpc->crpc_wi;
 
-        LASSERT (status != 0 || wi->swi_state == SWI_STATE_DONE);
+       LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
 
-        cfs_spin_lock(&rpc->crpc_lock);
+       spin_lock(&rpc->crpc_lock);
 
         rpc->crpc_closed = 1;
         if (rpc->crpc_status == 0)
@@ -1200,7 +1200,7 @@ srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
         LASSERT (!srpc_event_pending(rpc));
        swi_exit_workitem(wi);
 
-       cfs_spin_unlock(&rpc->crpc_lock);
+       spin_unlock(&rpc->crpc_lock);
 
        (*rpc->crpc_done)(rpc);
        return;
@@ -1218,14 +1218,14 @@ srpc_send_rpc (swi_workitem_t *wi)
         LASSERT (rpc != NULL);
         LASSERT (wi == &rpc->crpc_wi);
 
-        cfs_spin_lock(&rpc->crpc_lock);
+       spin_lock(&rpc->crpc_lock);
 
-        if (rpc->crpc_aborted) {
-                cfs_spin_unlock(&rpc->crpc_lock);
-                goto abort;
-        }
+       if (rpc->crpc_aborted) {
+               spin_unlock(&rpc->crpc_lock);
+               goto abort;
+       }
 
-        cfs_spin_unlock(&rpc->crpc_lock);
+       spin_unlock(&rpc->crpc_lock);
 
         switch (wi->swi_state) {
         default:
@@ -1306,11 +1306,11 @@ srpc_send_rpc (swi_workitem_t *wi)
                 return 1;
         }
 
-        if (rc != 0) {
-                cfs_spin_lock(&rpc->crpc_lock);
-                srpc_abort_rpc(rpc, rc);
-                cfs_spin_unlock(&rpc->crpc_lock);
-        }
+       if (rc != 0) {
+               spin_lock(&rpc->crpc_lock);
+               srpc_abort_rpc(rpc, rc);
+               spin_unlock(&rpc->crpc_lock);
+       }
 
 abort:
         if (rpc->crpc_aborted) {
@@ -1396,7 +1396,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
        LASSERT(buffer != NULL);
        rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
 
-       cfs_spin_lock(&scd->scd_lock);
+       spin_lock(&scd->scd_lock);
 
        if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
                /* Repost buffer before replying since test client
@@ -1406,7 +1406,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
                rpc->srpc_reqstbuf = NULL;
        }
 
-       cfs_spin_unlock(&scd->scd_lock);
+       spin_unlock(&scd->scd_lock);
 
         ev->ev_fired = 0;
         ev->ev_data  = rpc;
@@ -1441,9 +1441,9 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
         LASSERT (!cfs_in_interrupt());
 
         if (ev->status != 0) {
-                cfs_spin_lock(&srpc_data.rpc_glock);
-                srpc_data.rpc_counters.errors++;
-                cfs_spin_unlock(&srpc_data.rpc_glock);
+               spin_lock(&srpc_data.rpc_glock);
+               srpc_data.rpc_counters.errors++;
+               spin_unlock(&srpc_data.rpc_glock);
         }
 
         rpcev->ev_lnet = ev->type;
@@ -1455,9 +1455,9 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
                 LBUG ();
         case SRPC_REQUEST_SENT:
                 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
-                        cfs_spin_lock(&srpc_data.rpc_glock);
-                        srpc_data.rpc_counters.rpcs_sent++;
-                        cfs_spin_unlock(&srpc_data.rpc_glock);
+                       spin_lock(&srpc_data.rpc_glock);
+                       srpc_data.rpc_counters.rpcs_sent++;
+                       spin_unlock(&srpc_data.rpc_glock);
                 }
         case SRPC_REPLY_RCVD:
         case SRPC_BULK_REQ_RCVD:
@@ -1474,24 +1474,24 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
                         LBUG ();
                 }
 
-                cfs_spin_lock(&crpc->crpc_lock);
+               spin_lock(&crpc->crpc_lock);
 
-                LASSERT (rpcev->ev_fired == 0);
-                rpcev->ev_fired  = 1;
-                rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
-                                                -EINTR : ev->status;
-                swi_schedule_workitem(&crpc->crpc_wi);
+               LASSERT(rpcev->ev_fired == 0);
+               rpcev->ev_fired  = 1;
+               rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
+                                               -EINTR : ev->status;
+               swi_schedule_workitem(&crpc->crpc_wi);
 
-                cfs_spin_unlock(&crpc->crpc_lock);
-                break;
+               spin_unlock(&crpc->crpc_lock);
+               break;
 
-        case SRPC_REQUEST_RCVD:
+       case SRPC_REQUEST_RCVD:
                scd = rpcev->ev_data;
                sv = scd->scd_svc;
 
                LASSERT(rpcev == &scd->scd_ev);
 
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
 
                 LASSERT (ev->unlinked);
                 LASSERT (ev->type == LNET_EVENT_PUT ||
@@ -1509,7 +1509,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
                if (sv->sv_shuttingdown) {
                        /* Leave buffer on scd->scd_buf_nposted since
                         * srpc_finish_service needs to traverse it. */
-                       cfs_spin_unlock(&scd->scd_lock);
+                       spin_unlock(&scd->scd_lock);
                        break;
                }
 
@@ -1564,11 +1564,11 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
                                          &scd->scd_buf_blocked);
                }
 
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
 
-                cfs_spin_lock(&srpc_data.rpc_glock);
-                srpc_data.rpc_counters.rpcs_rcvd++;
-                cfs_spin_unlock(&srpc_data.rpc_glock);
+               spin_lock(&srpc_data.rpc_glock);
+               srpc_data.rpc_counters.rpcs_rcvd++;
+               spin_unlock(&srpc_data.rpc_glock);
                 break;
 
         case SRPC_BULK_GET_RPLD:
@@ -1581,29 +1581,29 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
 
         case SRPC_BULK_PUT_SENT:
                 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
-                        cfs_spin_lock(&srpc_data.rpc_glock);
+                       spin_lock(&srpc_data.rpc_glock);
 
-                        if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
-                                srpc_data.rpc_counters.bulk_get += ev->mlength;
-                        else
-                                srpc_data.rpc_counters.bulk_put += ev->mlength;
+                       if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
+                               srpc_data.rpc_counters.bulk_get += ev->mlength;
+                       else
+                               srpc_data.rpc_counters.bulk_put += ev->mlength;
 
-                        cfs_spin_unlock(&srpc_data.rpc_glock);
-                }
-        case SRPC_REPLY_SENT:
+                       spin_unlock(&srpc_data.rpc_glock);
+               }
+       case SRPC_REPLY_SENT:
                srpc = rpcev->ev_data;
                scd  = srpc->srpc_scd;
 
                LASSERT(rpcev == &srpc->srpc_ev);
 
-               cfs_spin_lock(&scd->scd_lock);
+               spin_lock(&scd->scd_lock);
 
                rpcev->ev_fired  = 1;
                rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
                                   -EINTR : ev->status;
                swi_schedule_workitem(&srpc->srpc_wi);
 
-               cfs_spin_unlock(&scd->scd_lock);
+               spin_unlock(&scd->scd_lock);
                break;
        }
 }
@@ -1638,10 +1638,10 @@ srpc_check_event (int timeout)
 int
 srpc_startup (void)
 {
-        int rc;
+       int rc;
 
-        memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
-        cfs_spin_lock_init(&srpc_data.rpc_glock);
+       memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
+       spin_lock_init(&srpc_data.rpc_glock);
 
         /* 1 second pause to avoid timestamp reuse */
         cfs_pause(cfs_time_seconds(1));
@@ -1707,7 +1707,7 @@ srpc_shutdown (void)
         default:
                 LBUG ();
         case SRPC_STATE_RUNNING:
-                cfs_spin_lock(&srpc_data.rpc_glock);
+               spin_lock(&srpc_data.rpc_glock);
 
                 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
                         srpc_service_t *sv = srpc_data.rpc_services[i];
@@ -1717,7 +1717,7 @@ srpc_shutdown (void)
                                   i, sv->sv_name);
                 }
 
-                cfs_spin_unlock(&srpc_data.rpc_glock);
+               spin_unlock(&srpc_data.rpc_glock);
 
                 stt_shutdown();
 
index 76a6ecf..7a36231 100644 (file)
@@ -213,8 +213,8 @@ typedef struct srpc_server_rpc {
 
 /* client-side state of a RPC */
 typedef struct srpc_client_rpc {
-        cfs_list_t           crpc_list;   /* chain on user's lists */
-        cfs_spinlock_t       crpc_lock;   /* serialize */
+       cfs_list_t              crpc_list;      /* chain on user's lists */
+       spinlock_t              crpc_lock;      /* serialize */
         int                  crpc_service;
         cfs_atomic_t         crpc_refcount;
         int                  crpc_timeout; /* # seconds to wait for reply */
@@ -273,7 +273,7 @@ do {                                                                    \
 /* CPU partition data of srpc service */
 struct srpc_service_cd {
        /** serialize */
-       cfs_spinlock_t          scd_lock;
+       spinlock_t              scd_lock;
        /** backref to service */
        struct srpc_service     *scd_svc;
        /** event buffer */
@@ -382,8 +382,8 @@ typedef struct sfw_test_instance {
         int                     tsi_concur;          /* concurrency */
         int                     tsi_loop;            /* loop count */
 
-        /* status of test instance */
-        cfs_spinlock_t          tsi_lock;         /* serialize */
+       /* status of test instance */
+       spinlock_t              tsi_lock;         /* serialize */
         int                     tsi_stopping:1;   /* test is stopping */
         cfs_atomic_t            tsi_nactive;      /* # of active test unit */
         cfs_list_t              tsi_units;        /* test units */
@@ -549,7 +549,7 @@ srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
         CFS_INIT_LIST_HEAD(&rpc->crpc_list);
        swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc,
                          lst_sched_test[lnet_cpt_of_nid(peer.nid)]);
-        cfs_spin_lock_init(&rpc->crpc_lock);
+       spin_lock_init(&rpc->crpc_lock);
         cfs_atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
 
         rpc->crpc_dest         = peer;
@@ -613,18 +613,18 @@ int selftest_wait_events(void);
 
 #endif
 
-#define lst_wait_until(cond, lock, fmt, ...)                            \
-do {                                                                    \
-        int __I = 2;                                                    \
-        while (!(cond)) {                                               \
-                CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET,               \
-                       fmt, ## __VA_ARGS__);                            \
-                cfs_spin_unlock(&(lock));                               \
-                                                                        \
-                selftest_wait_events();                                 \
-                                                                        \
-                cfs_spin_lock(&(lock));                                 \
-        }                                                               \
+#define lst_wait_until(cond, lock, fmt, ...)                           \
+do {                                                                   \
+       int __I = 2;                                                    \
+       while (!(cond)) {                                               \
+               CDEBUG(IS_PO2(++__I) ? D_WARNING : D_NET,               \
+                      fmt, ## __VA_ARGS__);                            \
+               spin_unlock(&(lock));                                   \
+                                                                       \
+               selftest_wait_events();                                 \
+                                                                       \
+               spin_lock(&(lock));                                     \
+       }                                                               \
 } while (0)
 
 static inline void
index 2b569dc..4b8ad60 100644 (file)
  * sorted by increasing expiry time. The number of slots is 2**7 (128),
  * to cover a time period of 1024 seconds into the future before wrapping.
  */
-#define        STTIMER_MINPOLL        3   /* log2 min poll interval (8 s) */
-#define        STTIMER_SLOTTIME       (1 << STTIMER_MINPOLL)
-#define        STTIMER_SLOTTIMEMASK   (~(STTIMER_SLOTTIME - 1))
-#define        STTIMER_NSLOTS         (1 << 7)
-#define        STTIMER_SLOT(t)        (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
+#define STTIMER_MINPOLL        3   /* log2 min poll interval (8 s) */
+#define STTIMER_SLOTTIME       (1 << STTIMER_MINPOLL)
+#define STTIMER_SLOTTIMEMASK   (~(STTIMER_SLOTTIME - 1))
+#define STTIMER_NSLOTS        (1 << 7)
+#define STTIMER_SLOT(t)               (&stt_data.stt_hash[(((t) >> STTIMER_MINPOLL) & \
                                                     (STTIMER_NSLOTS - 1))])
 
 struct st_timer_data {
-        cfs_spinlock_t   stt_lock;
+       spinlock_t       stt_lock;
         /* start time of the slot processed previously */
         cfs_time_t       stt_prev_slot;
         cfs_list_t       stt_hash[STTIMER_NSLOTS];
@@ -70,11 +70,11 @@ struct st_timer_data {
 } stt_data;
 
 void
-stt_add_timer (stt_timer_t *timer)
+stt_add_timer(stt_timer_t *timer)
 {
-        cfs_list_t *pos;
+       cfs_list_t *pos;
 
-        cfs_spin_lock(&stt_data.stt_lock);
+       spin_lock(&stt_data.stt_lock);
 
 #ifdef __KERNEL__
         LASSERT (stt_data.stt_nthreads > 0);
@@ -93,7 +93,7 @@ stt_add_timer (stt_timer_t *timer)
         }
         cfs_list_add(&timer->stt_list, pos);
 
-        cfs_spin_unlock(&stt_data.stt_lock);
+       spin_unlock(&stt_data.stt_lock);
 }
 
 /*
@@ -108,9 +108,9 @@ stt_add_timer (stt_timer_t *timer)
 int
 stt_del_timer (stt_timer_t *timer)
 {
-        int ret = 0;
+       int ret = 0;
 
-        cfs_spin_lock(&stt_data.stt_lock);
+       spin_lock(&stt_data.stt_lock);
 
 #ifdef __KERNEL__
         LASSERT (stt_data.stt_nthreads > 0);
@@ -122,8 +122,8 @@ stt_del_timer (stt_timer_t *timer)
                 cfs_list_del_init(&timer->stt_list);
         }
 
-        cfs_spin_unlock(&stt_data.stt_lock);
-        return ret;
+       spin_unlock(&stt_data.stt_lock);
+       return ret;
 }
 
 /* called with stt_data.stt_lock held */
@@ -140,15 +140,15 @@ stt_expire_list (cfs_list_t *slot, cfs_time_t now)
                         break;
 
                 cfs_list_del_init(&timer->stt_list);
-                cfs_spin_unlock(&stt_data.stt_lock);
+               spin_unlock(&stt_data.stt_lock);
 
-                expired++;
-                (*timer->stt_func) (timer->stt_data);
-                
-                cfs_spin_lock(&stt_data.stt_lock);
-        }
+               expired++;
+               (*timer->stt_func) (timer->stt_data);
+
+               spin_lock(&stt_data.stt_lock);
+       }
 
-        return expired;
+       return expired;
 }
 
 int
@@ -161,16 +161,16 @@ stt_check_timers (cfs_time_t *last)
         now = cfs_time_current_sec();
         this_slot = now & STTIMER_SLOTTIMEMASK;
 
-        cfs_spin_lock(&stt_data.stt_lock);
+       spin_lock(&stt_data.stt_lock);
 
-        while (cfs_time_aftereq(this_slot, *last)) {
-                expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
-                this_slot = cfs_time_sub(this_slot, STTIMER_SLOTTIME);
-        }
+       while (cfs_time_aftereq(this_slot, *last)) {
+               expired += stt_expire_list(STTIMER_SLOT(this_slot), now);
+               this_slot = cfs_time_sub(this_slot, STTIMER_SLOTTIME);
+       }
 
-        *last = now & STTIMER_SLOTTIMEMASK;
-        cfs_spin_unlock(&stt_data.stt_lock);
-        return expired;
+       *last = now & STTIMER_SLOTTIMEMASK;
+       spin_unlock(&stt_data.stt_lock);
+       return expired;
 }
 
 #ifdef __KERNEL__
@@ -195,10 +195,10 @@ stt_timer_main (void *arg)
                                    rc);
         }
 
-        cfs_spin_lock(&stt_data.stt_lock);
-        stt_data.stt_nthreads--;
-        cfs_spin_unlock(&stt_data.stt_lock);
-        return 0;
+       spin_lock(&stt_data.stt_lock);
+       stt_data.stt_nthreads--;
+       spin_unlock(&stt_data.stt_lock);
+       return 0;
 }
 
 int
@@ -212,10 +212,10 @@ stt_start_timer_thread (void)
         if (pid < 0)
                 return (int)pid;
 
-        cfs_spin_lock(&stt_data.stt_lock);
-        stt_data.stt_nthreads++;
-        cfs_spin_unlock(&stt_data.stt_lock);
-        return 0;
+       spin_lock(&stt_data.stt_lock);
+       stt_data.stt_nthreads++;
+       spin_unlock(&stt_data.stt_lock);
+       return 0;
 }
 
 #else /* !__KERNEL__ */
@@ -243,7 +243,7 @@ stt_startup (void)
         stt_data.stt_shuttingdown = 0;
         stt_data.stt_prev_slot = cfs_time_current_sec() & STTIMER_SLOTTIMEMASK;
 
-        cfs_spin_lock_init(&stt_data.stt_lock);
+       spin_lock_init(&stt_data.stt_lock);
         for (i = 0; i < STTIMER_NSLOTS; i++)
                 CFS_INIT_LIST_HEAD(&stt_data.stt_hash[i]);
 
@@ -263,7 +263,7 @@ stt_shutdown (void)
 {
         int i;
 
-        cfs_spin_lock(&stt_data.stt_lock);
+       spin_lock(&stt_data.stt_lock);
 
         for (i = 0; i < STTIMER_NSLOTS; i++)
                 LASSERT (cfs_list_empty(&stt_data.stt_hash[i]));
@@ -277,6 +277,5 @@ stt_shutdown (void)
                        stt_data.stt_nthreads);
 #endif
 
-        cfs_spin_unlock(&stt_data.stt_lock);
-        return;
+       spin_unlock(&stt_data.stt_lock);
 }
index 3c26ee0..b543c7a 100644 (file)
@@ -196,7 +196,7 @@ usocklnd_check_peer_stale(lnet_ni_t *ni, lnet_process_id_t id)
                 return;
         }
 
-        if (cfs_mt_atomic_read(&peer->up_refcount) == 2) {
+       if (mt_atomic_read(&peer->up_refcount) == 2) {
                 int i;
                 for (i = 0; i < N_CONN_TYPES; i++)
                         LASSERT (peer->up_conns[i] == NULL);
@@ -250,7 +250,7 @@ usocklnd_create_passive_conn(lnet_ni_t *ni,
         CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
         CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
         pthread_mutex_init(&conn->uc_lock, NULL);
-        cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
+       mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
 
         *connp = conn;
         return 0;
@@ -307,7 +307,7 @@ usocklnd_create_active_conn(usock_peer_t *peer, int type,
         CFS_INIT_LIST_HEAD (&conn->uc_tx_list);
         CFS_INIT_LIST_HEAD (&conn->uc_zcack_list);
         pthread_mutex_init(&conn->uc_lock, NULL);
-        cfs_mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
+       mt_atomic_set(&conn->uc_refcount, 1); /* 1 ref for me */
 
         *connp = conn;
         return 0;
@@ -709,7 +709,7 @@ usocklnd_create_peer(lnet_ni_t *ni, lnet_process_id_t id,
         peer->up_incrn_is_set = 0;
         peer->up_errored      = 0;
         peer->up_last_alive   = 0;
-        cfs_mt_atomic_set (&peer->up_refcount, 1); /* 1 ref for caller */
+       mt_atomic_set(&peer->up_refcount, 1); /* 1 ref for caller */
         pthread_mutex_init(&peer->up_lock, NULL);
 
         pthread_mutex_lock(&net->un_lock);
index 9e0baef..b58878c 100644 (file)
@@ -200,7 +200,7 @@ usocklnd_poll_thread(void *arg)
         }
 
         /* unblock usocklnd_shutdown() */
-        cfs_mt_complete(&pt_data->upt_completion);
+       mt_complete(&pt_data->upt_completion);
 
         return 0;
 }
index cf670e2..3dc175b 100644 (file)
@@ -141,7 +141,7 @@ usocklnd_release_poll_states(int n)
                 libcfs_sock_release(pt->upt_notifier[1]);
 
                 pthread_mutex_destroy(&pt->upt_pollrequests_lock);
-                cfs_mt_fini_completion(&pt->upt_completion);
+               mt_fini_completion(&pt->upt_completion);
 
                 LIBCFS_FREE (pt->upt_pollfd,
                              sizeof(struct pollfd) * pt->upt_npollfd);
@@ -284,7 +284,7 @@ usocklnd_base_startup()
                 CFS_INIT_LIST_HEAD (&pt->upt_pollrequests);
                 CFS_INIT_LIST_HEAD (&pt->upt_stale_list);
                 pthread_mutex_init(&pt->upt_pollrequests_lock, NULL);
-                cfs_mt_init_completion(&pt->upt_completion);
+               mt_init_completion(&pt->upt_completion);
         }
 
         /* Initialize peer hash list */
@@ -333,7 +333,7 @@ usocklnd_base_shutdown(int n)
         for (i = 0; i < n; i++) {
                 usock_pollthread_t *pt = &usock_data.ud_pollthreads[i];
                 usocklnd_wakeup_pollthread(i);
-                cfs_mt_wait_for_completion(&pt->upt_completion);
+               mt_wait_for_completion(&pt->upt_completion);
         }
 
         pthread_rwlock_destroy(&usock_data.ud_peers_lock);
index 1dd4336..12a1a6d 100644 (file)
@@ -94,7 +94,7 @@ typedef struct {
         int                uc_sending;       /* send op is in progress */
         usock_tx_t        *uc_tx_hello;      /* fake tx with hello */
 
-        cfs_mt_atomic_t    uc_refcount;      /* # of users */
+       mt_atomic_t    uc_refcount;      /* # of users */
         pthread_mutex_t    uc_lock;          /* serialize */
         int                uc_errored;       /* a flag for lnet_notify() */
 } usock_conn_t;
@@ -129,7 +129,7 @@ typedef struct usock_peer_s {
         __u64             up_incarnation; /* peer's incarnation */
         int               up_incrn_is_set;/* 0 if peer's incarnation
                                                * hasn't been set so far */
-        cfs_mt_atomic_t   up_refcount;    /* # of users */
+       mt_atomic_t   up_refcount;    /* # of users */
         pthread_mutex_t   up_lock;        /* serialize */
         int               up_errored;     /* a flag for lnet_notify() */
         cfs_time_t        up_last_alive;  /* when the peer was last alive */
@@ -152,7 +152,7 @@ typedef struct {
         cfs_list_t          upt_pollrequests;   /* list of poll requests */
         pthread_mutex_t     upt_pollrequests_lock; /* serialize */
         int                 upt_errno;         /* non-zero if errored */
-        cfs_mt_completion_t upt_completion;    /* wait/signal facility for
+       mt_completion_t upt_completion;    /* wait/signal facility for
                                                 * syncronizing shutdown */
 } usock_pollthread_t;
 
@@ -225,8 +225,8 @@ typedef struct {
 static inline void
 usocklnd_conn_addref(usock_conn_t *conn)
 {
-        LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
-        cfs_mt_atomic_inc(&conn->uc_refcount);
+       LASSERT(mt_atomic_read(&conn->uc_refcount) > 0);
+       mt_atomic_inc(&conn->uc_refcount);
 }
 
 void usocklnd_destroy_conn(usock_conn_t *conn);
@@ -234,16 +234,16 @@ void usocklnd_destroy_conn(usock_conn_t *conn);
 static inline void
 usocklnd_conn_decref(usock_conn_t *conn)
 {
-        LASSERT (cfs_mt_atomic_read(&conn->uc_refcount) > 0);
-        if (cfs_mt_atomic_dec_and_test(&conn->uc_refcount))
+       LASSERT(mt_atomic_read(&conn->uc_refcount) > 0);
+       if (mt_atomic_dec_and_test(&conn->uc_refcount))
                 usocklnd_destroy_conn(conn);
 }
 
 static inline void
 usocklnd_peer_addref(usock_peer_t *peer)
 {
-        LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
-        cfs_mt_atomic_inc(&peer->up_refcount);
+       LASSERT(mt_atomic_read(&peer->up_refcount) > 0);
+       mt_atomic_inc(&peer->up_refcount);
 }
 
 void usocklnd_destroy_peer(usock_peer_t *peer);
@@ -251,8 +251,8 @@ void usocklnd_destroy_peer(usock_peer_t *peer);
 static inline void
 usocklnd_peer_decref(usock_peer_t *peer)
 {
-        LASSERT (cfs_mt_atomic_read(&peer->up_refcount) > 0);
-        if (cfs_mt_atomic_dec_and_test(&peer->up_refcount))
+       LASSERT(mt_atomic_read(&peer->up_refcount) > 0);
+       if (mt_atomic_dec_and_test(&peer->up_refcount))
                 usocklnd_destroy_peer(peer);
 }
 
index e634055..6d21b2e 100644 (file)
@@ -71,7 +71,7 @@ int seq_server_set_cli(struct lu_server_seq *seq,
          * Ask client for new range, assign that range to ->seq_space and write
          * seq state to backing store should be atomic.
          */
-        cfs_mutex_lock(&seq->lss_mutex);
+       mutex_lock(&seq->lss_mutex);
 
         if (cli == NULL) {
                 CDEBUG(D_INFO, "%s: Detached sequence client %s\n",
@@ -93,7 +93,7 @@ int seq_server_set_cli(struct lu_server_seq *seq,
         cli->lcs_space.lsr_index = seq->lss_site->ms_node_id;
         EXIT;
 out_up:
-        cfs_mutex_unlock(&seq->lss_mutex);
+       mutex_unlock(&seq->lss_mutex);
         return rc;
 }
 EXPORT_SYMBOL(seq_server_set_cli);
@@ -150,9 +150,9 @@ int seq_server_alloc_super(struct lu_server_seq *seq,
         int rc;
         ENTRY;
 
-        cfs_mutex_lock(&seq->lss_mutex);
+       mutex_lock(&seq->lss_mutex);
         rc = __seq_server_alloc_super(seq, out, env);
-        cfs_mutex_unlock(&seq->lss_mutex);
+       mutex_unlock(&seq->lss_mutex);
 
         RETURN(rc);
 }
@@ -281,9 +281,9 @@ int seq_server_alloc_meta(struct lu_server_seq *seq,
         int rc;
         ENTRY;
 
-        cfs_mutex_lock(&seq->lss_mutex);
+       mutex_lock(&seq->lss_mutex);
         rc = __seq_server_alloc_meta(seq, out, env);
-        cfs_mutex_unlock(&seq->lss_mutex);
+       mutex_unlock(&seq->lss_mutex);
 
         RETURN(rc);
 }
@@ -490,7 +490,7 @@ int seq_server_init(struct lu_server_seq *seq,
         range_init(&seq->lss_hiwater_set);
         seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;
 
-        cfs_mutex_init(&seq->lss_mutex);
+       mutex_init(&seq->lss_mutex);
 
         seq->lss_width = is_srv ?
                 LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
index bc0f46b..e58e7c9 100644 (file)
@@ -149,7 +149,7 @@ int seq_client_alloc_super(struct lu_client_seq *seq,
         int rc;
         ENTRY;
 
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
 
 #ifdef __KERNEL__
         if (seq->lcs_srv) {
@@ -163,7 +163,7 @@ int seq_client_alloc_super(struct lu_client_seq *seq,
 #ifdef __KERNEL__
         }
 #endif
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
         RETURN(rc);
 }
 
@@ -227,24 +227,24 @@ static int seq_fid_alloc_prep(struct lu_client_seq *seq,
         if (seq->lcs_update) {
                 cfs_waitq_add(&seq->lcs_waitq, link);
                 cfs_set_current_state(CFS_TASK_UNINT);
-                cfs_mutex_unlock(&seq->lcs_mutex);
+               mutex_unlock(&seq->lcs_mutex);
 
                 cfs_waitq_wait(link, CFS_TASK_UNINT);
 
-                cfs_mutex_lock(&seq->lcs_mutex);
+               mutex_lock(&seq->lcs_mutex);
                 cfs_waitq_del(&seq->lcs_waitq, link);
                 cfs_set_current_state(CFS_TASK_RUNNING);
                 return -EAGAIN;
         }
         ++seq->lcs_update;
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
         return 0;
 }
 
 static void seq_fid_alloc_fini(struct lu_client_seq *seq)
 {
         LASSERT(seq->lcs_update == 1);
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
         --seq->lcs_update;
         cfs_waitq_signal(&seq->lcs_waitq);
 }
@@ -257,7 +257,7 @@ int seq_client_get_seq(const struct lu_env *env,
         int rc;
 
         LASSERT(seqnr != NULL);
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
         cfs_waitlink_init(&link);
 
         while (1) {
@@ -271,7 +271,7 @@ int seq_client_get_seq(const struct lu_env *env,
                 CERROR("%s: Can't allocate new sequence, "
                        "rc %d\n", seq->lcs_name, rc);
                 seq_fid_alloc_fini(seq);
-                cfs_mutex_unlock(&seq->lcs_mutex);
+               mutex_unlock(&seq->lcs_mutex);
                 return rc;
         }
 
@@ -289,7 +289,7 @@ int seq_client_get_seq(const struct lu_env *env,
          * to setup FLD for it.
          */
         seq_fid_alloc_fini(seq);
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
 
         return rc;
 }
@@ -307,7 +307,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
         LASSERT(fid != NULL);
 
         cfs_waitlink_init(&link);
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
 
        if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_EXHAUST))
                seq->lcs_fid.f_oid = seq->lcs_width;
@@ -332,7 +332,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
                         CERROR("%s: Can't allocate new sequence, "
                                "rc %d\n", seq->lcs_name, rc);
                         seq_fid_alloc_fini(seq);
-                        cfs_mutex_unlock(&seq->lcs_mutex);
+                       mutex_unlock(&seq->lcs_mutex);
                         RETURN(rc);
                 }
 
@@ -354,7 +354,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
         }
 
         *fid = seq->lcs_fid;
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
 
         CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name,  PFID(fid));
         RETURN(rc);
@@ -371,16 +371,16 @@ void seq_client_flush(struct lu_client_seq *seq)
 
         LASSERT(seq != NULL);
         cfs_waitlink_init(&link);
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
 
         while (seq->lcs_update) {
                 cfs_waitq_add(&seq->lcs_waitq, &link);
                 cfs_set_current_state(CFS_TASK_UNINT);
-                cfs_mutex_unlock(&seq->lcs_mutex);
+               mutex_unlock(&seq->lcs_mutex);
 
                 cfs_waitq_wait(&link, CFS_TASK_UNINT);
 
-                cfs_mutex_lock(&seq->lcs_mutex);
+               mutex_lock(&seq->lcs_mutex);
                 cfs_waitq_del(&seq->lcs_waitq, &link);
                 cfs_set_current_state(CFS_TASK_RUNNING);
         }
@@ -394,7 +394,7 @@ void seq_client_flush(struct lu_client_seq *seq)
         seq->lcs_space.lsr_index = -1;
 
         range_init(&seq->lcs_space);
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
 }
 EXPORT_SYMBOL(seq_client_flush);
 
@@ -469,7 +469,7 @@ int seq_client_init(struct lu_client_seq *seq,
         seq->lcs_exp = exp;
         seq->lcs_srv = srv;
         seq->lcs_type = type;
-        cfs_mutex_init(&seq->lcs_mutex);
+       mutex_init(&seq->lcs_mutex);
         seq->lcs_width = LUSTRE_SEQ_MAX_WIDTH;
         cfs_waitq_init(&seq->lcs_waitq);
 
index 27fd1f0..fd66e19 100644 (file)
@@ -112,7 +112,7 @@ seq_server_proc_write_space(struct file *file, const char *buffer,
 
         LASSERT(seq != NULL);
 
-        cfs_mutex_lock(&seq->lss_mutex);
+       mutex_lock(&seq->lss_mutex);
        rc = seq_proc_write_common(file, buffer, count,
                                    data, &seq->lss_space);
        if (rc == 0) {
@@ -120,7 +120,7 @@ seq_server_proc_write_space(struct file *file, const char *buffer,
                        seq->lss_name, PRANGE(&seq->lss_space));
        }
 
-        cfs_mutex_unlock(&seq->lss_mutex);
+       mutex_unlock(&seq->lss_mutex);
 
         RETURN(count);
 }
@@ -135,10 +135,10 @@ seq_server_proc_read_space(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-        cfs_mutex_lock(&seq->lss_mutex);
+       mutex_lock(&seq->lss_mutex);
        rc = seq_proc_read_common(page, start, off, count, eof,
                                   data, &seq->lss_space);
-        cfs_mutex_unlock(&seq->lss_mutex);
+       mutex_unlock(&seq->lss_mutex);
 
        RETURN(rc);
 }
@@ -181,7 +181,7 @@ seq_server_proc_write_width(struct file *file, const char *buffer,
 
         LASSERT(seq != NULL);
 
-       cfs_mutex_lock(&seq->lss_mutex);
+       mutex_lock(&seq->lss_mutex);
 
        rc = lprocfs_write_helper(buffer, count, &val);
        if (rc != 0) {
@@ -194,7 +194,7 @@ seq_server_proc_write_width(struct file *file, const char *buffer,
        CDEBUG(D_INFO, "%s: Width: "LPU64"\n",
               seq->lss_name, seq->lss_width);
 out_unlock:
-       cfs_mutex_unlock(&seq->lss_mutex);
+       mutex_unlock(&seq->lss_mutex);
 
         RETURN(count);
 }
@@ -209,9 +209,9 @@ seq_server_proc_read_width(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-        cfs_mutex_lock(&seq->lss_mutex);
+       mutex_lock(&seq->lss_mutex);
         rc = snprintf(page, count, LPU64"\n", seq->lss_width);
-        cfs_mutex_unlock(&seq->lss_mutex);
+       mutex_unlock(&seq->lss_mutex);
 
        RETURN(rc);
 }
@@ -227,7 +227,7 @@ seq_client_proc_write_space(struct file *file, const char *buffer,
 
         LASSERT(seq != NULL);
 
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
        rc = seq_proc_write_common(file, buffer, count,
                                    data, &seq->lcs_space);
 
@@ -236,7 +236,7 @@ seq_client_proc_write_space(struct file *file, const char *buffer,
                        seq->lcs_name, PRANGE(&seq->lcs_space));
        }
 
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
 
         RETURN(count);
 }
@@ -251,10 +251,10 @@ seq_client_proc_read_space(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
        rc = seq_proc_read_common(page, start, off, count, eof,
                                   data, &seq->lcs_space);
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
 
        RETURN(rc);
 }
@@ -269,11 +269,11 @@ seq_client_proc_write_width(struct file *file, const char *buffer,
 
         LASSERT(seq != NULL);
 
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
 
         rc = lprocfs_write_helper(buffer, count, &val);
         if (rc) {
-                cfs_mutex_unlock(&seq->lcs_mutex);
+               mutex_unlock(&seq->lcs_mutex);
                 RETURN(rc);
         }
 
@@ -286,7 +286,7 @@ seq_client_proc_write_width(struct file *file, const char *buffer,
                 }
         }
 
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
 
         RETURN(count);
 }
@@ -301,9 +301,9 @@ seq_client_proc_read_width(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
         rc = snprintf(page, count, LPU64"\n", seq->lcs_width);
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
 
        RETURN(rc);
 }
@@ -318,9 +318,9 @@ seq_client_proc_read_fid(char *page, char **start, off_t off,
 
         LASSERT(seq != NULL);
 
-        cfs_mutex_lock(&seq->lcs_mutex);
+       mutex_lock(&seq->lcs_mutex);
         rc = snprintf(page, count, DFID"\n", PFID(&seq->lcs_fid));
-        cfs_mutex_unlock(&seq->lcs_mutex);
+       mutex_unlock(&seq->lcs_mutex);
 
        RETURN(rc);
 }
index 8550f91..ccfd381 100644 (file)
@@ -83,7 +83,7 @@ struct fld_cache *fld_cache_init(const char *name,
         CFS_INIT_LIST_HEAD(&cache->fci_lru);
 
         cache->fci_cache_count = 0;
-        cfs_spin_lock_init(&cache->fci_lock);
+       spin_lock_init(&cache->fci_lock);
 
         strncpy(cache->fci_name, name,
                 sizeof(cache->fci_name));
@@ -259,14 +259,14 @@ static int fld_cache_shrink(struct fld_cache *cache)
  */
 void fld_cache_flush(struct fld_cache *cache)
 {
-        ENTRY;
+       ENTRY;
 
-        cfs_spin_lock(&cache->fci_lock);
-        cache->fci_cache_size = 0;
-        fld_cache_shrink(cache);
-        cfs_spin_unlock(&cache->fci_lock);
+       spin_lock(&cache->fci_lock);
+       cache->fci_cache_size = 0;
+       fld_cache_shrink(cache);
+       spin_unlock(&cache->fci_lock);
 
-        EXIT;
+       EXIT;
 }
 
 /**
@@ -412,7 +412,7 @@ void fld_cache_insert(struct fld_cache *cache,
          * So we don't need to search new entry before starting insertion loop.
          */
 
-        cfs_spin_lock(&cache->fci_lock);
+       spin_lock(&cache->fci_lock);
         fld_cache_shrink(cache);
 
         head = &cache->fci_entries_head;
@@ -440,22 +440,21 @@ void fld_cache_insert(struct fld_cache *cache,
         /* Add new entry to cache and lru list. */
         fld_cache_entry_add(cache, f_new, prev);
 out:
-        cfs_spin_unlock(&cache->fci_lock);
-        EXIT;
+       spin_unlock(&cache->fci_lock);
+       EXIT;
 }
 
 /**
  * lookup \a seq sequence for range in fld cache.
  */
 int fld_cache_lookup(struct fld_cache *cache,
-                     const seqno_t seq, struct lu_seq_range *range)
+                    const seqno_t seq, struct lu_seq_range *range)
 {
-        struct fld_cache_entry *flde;
-        cfs_list_t *head;
-        ENTRY;
-
+       struct fld_cache_entry *flde;
+       cfs_list_t *head;
+       ENTRY;
 
-        cfs_spin_lock(&cache->fci_lock);
+       spin_lock(&cache->fci_lock);
         head = &cache->fci_entries_head;
 
         cache->fci_stat.fst_count++;
@@ -469,10 +468,10 @@ int fld_cache_lookup(struct fld_cache *cache,
                         /* update position of this entry in lru list. */
                         cfs_list_move(&flde->fce_lru, &cache->fci_lru);
                         cache->fci_stat.fst_cache++;
-                        cfs_spin_unlock(&cache->fci_lock);
-                        RETURN(0);
-                }
-        }
-        cfs_spin_unlock(&cache->fci_lock);
-        RETURN(-ENOENT);
+                       spin_unlock(&cache->fci_lock);
+                       RETURN(0);
+               }
+       }
+       spin_unlock(&cache->fci_lock);
+       RETURN(-ENOENT);
 }
index d95193f..01bc8d8 100644 (file)
@@ -149,7 +149,7 @@ int fld_server_create(struct lu_server_fld *fld,
         ENTRY;
 
         info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
-        cfs_mutex_lock(&fld->lsf_lock);
+       mutex_lock(&fld->lsf_lock);
 
         erange = &info->fti_lrange;
         new = &info->fti_irange;
@@ -250,7 +250,7 @@ out:
         if (rc == 0)
                 fld_cache_insert(fld->lsf_cache, new);
 
-        cfs_mutex_unlock(&fld->lsf_lock);
+       mutex_unlock(&fld->lsf_lock);
 
         CDEBUG((rc != 0 ? D_ERROR : D_INFO),
                "%s: FLD create: given range : "DRANGE
@@ -535,7 +535,7 @@ int fld_server_init(struct lu_server_fld *fld, struct dt_device *dt,
         cache_threshold = cache_size *
                 FLD_SERVER_CACHE_THRESHOLD / 100;
 
-        cfs_mutex_init(&fld->lsf_lock);
+       mutex_init(&fld->lsf_lock);
         fld->lsf_cache = fld_cache_init(fld->lsf_name,
                                         cache_size, cache_threshold);
         if (IS_ERR(fld->lsf_cache)) {
index c6f87f1..61955be 100644 (file)
@@ -79,11 +79,11 @@ struct fld_cache_entry {
 };
 
 struct fld_cache {
-        /**
-         * Cache guard, protects fci_hash mostly because others immutable after
-         * init is finished.
-         */
-        cfs_spinlock_t           fci_lock;
+       /**
+        * Cache guard, protects fci_hash mostly because others immutable after
+        * init is finished.
+        */
+       spinlock_t               fci_lock;
 
         /**
          * Cache shrink threshold */
index 2e18b08..cde98e2 100644 (file)
@@ -173,17 +173,16 @@ struct lu_fld_hash fld_hash[] = {
 };
 
 static struct lu_fld_target *
-fld_client_get_target(struct lu_client_fld *fld,
-                      seqno_t seq)
+fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
 {
-        struct lu_fld_target *target;
-        ENTRY;
+       struct lu_fld_target *target;
+       ENTRY;
 
-        LASSERT(fld->lcf_hash != NULL);
+       LASSERT(fld->lcf_hash != NULL);
 
-        cfs_spin_lock(&fld->lcf_lock);
-        target = fld->lcf_hash->fh_scan_func(fld, seq);
-        cfs_spin_unlock(&fld->lcf_lock);
+       spin_lock(&fld->lcf_lock);
+       target = fld->lcf_hash->fh_scan_func(fld, seq);
+       spin_unlock(&fld->lcf_lock);
 
         if (target != NULL) {
                 CDEBUG(D_INFO, "%s: Found target (idx "LPU64
@@ -223,10 +222,10 @@ int fld_client_add_target(struct lu_client_fld *fld,
         if (target == NULL)
                 RETURN(-ENOMEM);
 
-        cfs_spin_lock(&fld->lcf_lock);
-        cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
-                if (tmp->ft_idx == tar->ft_idx) {
-                        cfs_spin_unlock(&fld->lcf_lock);
+       spin_lock(&fld->lcf_lock);
+       cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
+               if (tmp->ft_idx == tar->ft_idx) {
+                       spin_unlock(&fld->lcf_lock);
                         OBD_FREE_PTR(target);
                         CERROR("Target %s exists in FLD and known as %s:#"LPU64"\n",
                                name, fld_target_name(tmp), tmp->ft_idx);
@@ -244,26 +243,25 @@ int fld_client_add_target(struct lu_client_fld *fld,
                           &fld->lcf_targets);
 
         fld->lcf_count++;
-        cfs_spin_unlock(&fld->lcf_lock);
+       spin_unlock(&fld->lcf_lock);
 
-        RETURN(0);
+       RETURN(0);
 }
 EXPORT_SYMBOL(fld_client_add_target);
 
 /* Remove export from FLD */
-int fld_client_del_target(struct lu_client_fld *fld,
-                          __u64 idx)
+int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
 {
-        struct lu_fld_target *target, *tmp;
-        ENTRY;
+       struct lu_fld_target *target, *tmp;
+       ENTRY;
 
-        cfs_spin_lock(&fld->lcf_lock);
-        cfs_list_for_each_entry_safe(target, tmp,
-                                     &fld->lcf_targets, ft_chain) {
-                if (target->ft_idx == idx) {
-                        fld->lcf_count--;
-                        cfs_list_del(&target->ft_chain);
-                        cfs_spin_unlock(&fld->lcf_lock);
+       spin_lock(&fld->lcf_lock);
+       cfs_list_for_each_entry_safe(target, tmp,
+                                    &fld->lcf_targets, ft_chain) {
+               if (target->ft_idx == idx) {
+                       fld->lcf_count--;
+                       cfs_list_del(&target->ft_chain);
+                       spin_unlock(&fld->lcf_lock);
 
                         if (target->ft_exp != NULL)
                                 class_export_put(target->ft_exp);
@@ -272,8 +270,8 @@ int fld_client_del_target(struct lu_client_fld *fld,
                         RETURN(0);
                 }
         }
-        cfs_spin_unlock(&fld->lcf_lock);
-        RETURN(-ENOENT);
+       spin_unlock(&fld->lcf_lock);
+       RETURN(-ENOENT);
 }
 EXPORT_SYMBOL(fld_client_del_target);
 
@@ -357,7 +355,7 @@ int fld_client_init(struct lu_client_fld *fld,
         }
 
         fld->lcf_count = 0;
-        cfs_spin_lock_init(&fld->lcf_lock);
+       spin_lock_init(&fld->lcf_lock);
         fld->lcf_hash = &fld_hash[hash];
         fld->lcf_flags = LUSTRE_FLD_INIT;
         CFS_INIT_LIST_HEAD(&fld->lcf_targets);
@@ -392,10 +390,10 @@ EXPORT_SYMBOL(fld_client_init);
 
 void fld_client_fini(struct lu_client_fld *fld)
 {
-        struct lu_fld_target *target, *tmp;
-        ENTRY;
+       struct lu_fld_target *target, *tmp;
+       ENTRY;
 
-        cfs_spin_lock(&fld->lcf_lock);
+       spin_lock(&fld->lcf_lock);
         cfs_list_for_each_entry_safe(target, tmp,
                                      &fld->lcf_targets, ft_chain) {
                 fld->lcf_count--;
@@ -404,7 +402,7 @@ void fld_client_fini(struct lu_client_fld *fld)
                         class_export_put(target->ft_exp);
                 OBD_FREE_PTR(target);
         }
-        cfs_spin_unlock(&fld->lcf_lock);
+       spin_unlock(&fld->lcf_lock);
 
         if (fld->lcf_cache != NULL) {
                 if (!IS_ERR(fld->lcf_cache))
index 1604609..a52b2ee 100644 (file)
@@ -70,9 +70,9 @@ fld_proc_read_targets(char *page, char **start, off_t off,
        int total = 0, rc;
        ENTRY;
 
-        LASSERT(fld != NULL);
+       LASSERT(fld != NULL);
 
-        cfs_spin_lock(&fld->lcf_lock);
+       spin_lock(&fld->lcf_lock);
         cfs_list_for_each_entry(target,
                                 &fld->lcf_targets, ft_chain)
         {
@@ -84,7 +84,7 @@ fld_proc_read_targets(char *page, char **start, off_t off,
                 if (count == 0)
                         break;
         }
-        cfs_spin_unlock(&fld->lcf_lock);
+       spin_unlock(&fld->lcf_lock);
        RETURN(total);
 }
 
@@ -96,12 +96,11 @@ fld_proc_read_hash(char *page, char **start, off_t off,
        int rc;
        ENTRY;
 
-        LASSERT(fld != NULL);
+       LASSERT(fld != NULL);
 
-        cfs_spin_lock(&fld->lcf_lock);
-        rc = snprintf(page, count, "%s\n",
-                      fld->lcf_hash->fh_name);
-        cfs_spin_unlock(&fld->lcf_lock);
+       spin_lock(&fld->lcf_lock);
+       rc = snprintf(page, count, "%s\n", fld->lcf_hash->fh_name);
+       spin_unlock(&fld->lcf_lock);
 
        RETURN(rc);
 }
@@ -127,16 +126,16 @@ fld_proc_write_hash(struct file *file, const char *buffer,
                 }
         }
 
-        if (hash != NULL) {
-                cfs_spin_lock(&fld->lcf_lock);
-                fld->lcf_hash = hash;
-                cfs_spin_unlock(&fld->lcf_lock);
+       if (hash != NULL) {
+               spin_lock(&fld->lcf_lock);
+               fld->lcf_hash = hash;
+               spin_unlock(&fld->lcf_lock);
 
-                CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
-                       fld->lcf_name, hash->fh_name);
-        }
+               CDEBUG(D_INFO, "%s: Changed hash to \"%s\"\n",
+                      fld->lcf_name, hash->fh_name);
+       }
 
-        RETURN(count);
+       RETURN(count);
 }
 
 static int
index 64370c2..9ba73c5 100644 (file)
@@ -388,9 +388,9 @@ struct cl_object_header {
          */
         /** @{ */
         /** Lock protecting page tree. */
-        cfs_spinlock_t           coh_page_guard;
-        /** Lock protecting lock list. */
-        cfs_spinlock_t           coh_lock_guard;
+       spinlock_t               coh_page_guard;
+       /** Lock protecting lock list. */
+       spinlock_t               coh_lock_guard;
         /** @} locks */
         /** Radix tree of cl_page's, cached for this object. */
         struct radix_tree_root   coh_tree;
@@ -414,12 +414,12 @@ struct cl_object_header {
          *
          * \todo XXX this can be read/write lock if needed.
          */
-        cfs_spinlock_t           coh_attr_guard;
-        /**
-         * Number of objects above this one: 0 for a top-object, 1 for its
-         * sub-object, etc.
-         */
-        unsigned                 coh_nesting;
+       spinlock_t               coh_attr_guard;
+       /**
+        * Number of objects above this one: 0 for a top-object, 1 for its
+        * sub-object, etc.
+        */
+       unsigned                 coh_nesting;
 };
 
 /**
@@ -719,13 +719,11 @@ struct cl_page {
          */
         const enum cl_page_state cp_state;
        /** Protect to get and put page, see cl_page_put and cl_vmpage_page */
-       cfs_spinlock_t           cp_lock;
-        /**
-         * Linkage of pages within some group. Protected by
-         * cl_page::cp_mutex. */
-        cfs_list_t               cp_batch;
-        /** Mutex serializing membership of a page in a batch. */
-        cfs_mutex_t              cp_mutex;
+       spinlock_t              cp_lock;
+       /** Linkage of pages within group. Protected by cl_page::cp_mutex. */
+       cfs_list_t              cp_batch;
+       /** Mutex serializing membership of a page in a batch. */
+       struct mutex            cp_mutex;
         /** Linkage of pages within cl_req. */
         cfs_list_t               cp_flight;
         /** Transfer error. */
@@ -1552,7 +1550,7 @@ struct cl_lock {
          *
          * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
          */
-        cfs_mutex_t           cll_guard;
+       struct mutex            cll_guard;
         cfs_task_t           *cll_guarder;
         int                   cll_depth;
 
index b1fbda0..e76defa 100644 (file)
@@ -682,7 +682,7 @@ struct local_oid_storage {
        struct dt_object *los_obj;
 
        /* data used to generate new fids */
-       cfs_mutex_t       los_id_lock;
+       struct mutex     los_id_lock;
        __u64             los_seq;
        __u32             los_last_oid;
 };
index 2ca110b..96fa5fb 100644 (file)
@@ -441,7 +441,7 @@ void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
 struct cl_client_cache {
        cfs_atomic_t    ccc_users;    /* # of users (OSCs) of this data */
        cfs_list_t      ccc_lru;      /* LRU list of cached clean pages */
-       cfs_spinlock_t  ccc_lru_lock; /* lock for list */
+       spinlock_t      ccc_lru_lock; /* lock for list */
        cfs_atomic_t    ccc_lru_left; /* # of LRU entries available */
        unsigned long   ccc_lru_max;  /* Max # of LRU entries possible */
        unsigned int    ccc_lru_shrinkers; /* # of threads reclaiming */
index 17260ae..d974fad 100644 (file)
@@ -90,18 +90,18 @@ void *inter_module_get(char *arg);
 static __inline__ int ext2_set_bit(int nr, void *addr)
 {
 #ifdef __BIG_ENDIAN
-        return cfs_set_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
+       return set_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
 #else
-        return cfs_set_bit(nr, addr);
+       return set_bit(nr, addr);
 #endif
 }
 
-static __inline__ int ext2_clear_bit(int nr, void *addr)
+static inline int ext2_clear_bit(int nr, void *addr)
 {
 #ifdef __BIG_ENDIAN
-        return cfs_clear_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
+       return clear_bit((nr ^ ((BITS_PER_LONG-1) & ~0x7)), addr);
 #else
-        return cfs_clear_bit(nr, addr);
+       return clear_bit(nr, addr);
 #endif
 }
 
@@ -111,7 +111,7 @@ static __inline__ int ext2_test_bit(int nr, void *addr)
         __const__ unsigned char *tmp = (__const__ unsigned char *) addr;
         return (tmp[nr >> 3] >> (nr & 7)) & 1;
 #else
-        return cfs_test_bit(nr, addr);
+       return test_bit(nr, addr);
 #endif
 }
 
index f872a1e..8b2273d 100644 (file)
 #include <linux/lustre_patchless_compat.h>
 
 #ifdef HAVE_FS_STRUCT_RWLOCK
-# define LOCK_FS_STRUCT(fs)   cfs_write_lock(&(fs)->lock)
-# define UNLOCK_FS_STRUCT(fs) cfs_write_unlock(&(fs)->lock)
+# define LOCK_FS_STRUCT(fs)    write_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs)  write_unlock(&(fs)->lock)
 #else
-# define LOCK_FS_STRUCT(fs)   cfs_spin_lock(&(fs)->lock)
-# define UNLOCK_FS_STRUCT(fs) cfs_spin_unlock(&(fs)->lock)
+# define LOCK_FS_STRUCT(fs)    spin_lock(&(fs)->lock)
+# define UNLOCK_FS_STRUCT(fs)  spin_unlock(&(fs)->lock)
 #endif
 
 #ifdef HAVE_FS_STRUCT_USE_PATH
@@ -218,11 +218,11 @@ static inline unsigned int mnt_get_count(struct vfsmount *mnt)
 #endif
 
 #ifdef HAVE_RW_TREE_LOCK
-#define TREE_READ_LOCK_IRQ(mapping)     read_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) read_unlock_irq(&(mapping)->tree_lock)
+#define TREE_READ_LOCK_IRQ(mapping)    read_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping)  read_unlock_irq(&(mapping)->tree_lock)
 #else
-#define TREE_READ_LOCK_IRQ(mapping) cfs_spin_lock_irq(&(mapping)->tree_lock)
-#define TREE_READ_UNLOCK_IRQ(mapping) cfs_spin_unlock_irq(&(mapping)->tree_lock)
+#define TREE_READ_LOCK_IRQ(mapping)    spin_lock_irq(&(mapping)->tree_lock)
+#define TREE_READ_UNLOCK_IRQ(mapping)  spin_unlock_irq(&(mapping)->tree_lock)
 #endif
 
 #ifdef HAVE_UNREGISTER_BLKDEV_RETURN_INT
index d665c1f..94500c6 100644 (file)
@@ -108,7 +108,7 @@ struct fsfilt_operations {
         int     (* fs_map_inode_pages)(struct inode *inode, struct page **page,
                                        int pages, unsigned long *blocks,
                                        int *created, int create,
-                                       cfs_mutex_t *sem);
+                                      struct mutex *sem);
         int     (* fs_write_record)(struct file *, void *, int size, loff_t *,
                                     int force_sync);
         int     (* fs_read_record)(struct file *, void *, int size, loff_t *);
@@ -406,7 +406,7 @@ static inline int fsfilt_map_inode_pages(struct obd_device *obd,
                                          struct inode *inode,
                                          struct page **page, int pages,
                                          unsigned long *blocks, int *created,
-                                         int create, cfs_mutex_t *mutex)
+                                        int create, struct mutex *mutex)
 {
         return obd->obd_fsops->fs_map_inode_pages(inode, page, pages, blocks,
                                                   created, create, mutex);
index 061c5da..3e8ff64 100644 (file)
 /* XXX copy & paste from 2.6.15 kernel */
 static inline void ll_remove_from_page_cache(struct page *page)
 {
-        struct address_space *mapping = page->mapping;
+       struct address_space *mapping = page->mapping;
 
-        BUG_ON(!PageLocked(page));
+       BUG_ON(!PageLocked(page));
 
 #ifdef HAVE_RW_TREE_LOCK
-        write_lock_irq(&mapping->tree_lock);
+       write_lock_irq(&mapping->tree_lock);
 #else
        spin_lock_irq(&mapping->tree_lock);
 #endif
-        radix_tree_delete(&mapping->page_tree, page->index);
-        page->mapping = NULL;
-        mapping->nrpages--;
-        __dec_zone_page_state(page, NR_FILE_PAGES);
+       radix_tree_delete(&mapping->page_tree, page->index);
+       page->mapping = NULL;
+       mapping->nrpages--;
+       __dec_zone_page_state(page, NR_FILE_PAGES);
 
 #ifdef HAVE_RW_TREE_LOCK
-        write_unlock_irq(&mapping->tree_lock);
+       write_unlock_irq(&mapping->tree_lock);
 #else
        spin_unlock_irq(&mapping->tree_lock);
 #endif
index 0566456..805e081 100644 (file)
@@ -59,15 +59,14 @@ struct ll_iattr {
 
 #define CLIENT_OBD_LIST_LOCK_DEBUG 1
 typedef struct {
-        cfs_spinlock_t          lock;
+       spinlock_t              lock;
 
 #ifdef CLIENT_OBD_LIST_LOCK_DEBUG
-        unsigned long       time;
-        struct task_struct *task;
-        const char         *func;
-        int                 line;
+       unsigned long       time;
+       struct task_struct *task;
+       const char         *func;
+       int                 line;
 #endif
-
 } client_obd_lock_t;
 
 #ifdef CLIENT_OBD_LIST_LOCK_DEBUG
@@ -75,9 +74,9 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
                                           const char *func,
                                           int line)
 {
-        unsigned long cur = jiffies;
-        while (1) {
-                if (cfs_spin_trylock(&lock->lock)) {
+       unsigned long cur = jiffies;
+       while (1) {
+               if (spin_trylock(&lock->lock)) {
                         LASSERT(lock->task == NULL);
                         lock->task = current;
                         lock->func = func;
@@ -110,28 +109,28 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
 
 static inline void client_obd_list_unlock(client_obd_lock_t *lock)
 {
-        LASSERT(lock->task != NULL);
-        lock->task = NULL;
-        lock->time = jiffies;
-        cfs_spin_unlock(&lock->lock);
+       LASSERT(lock->task != NULL);
+       lock->task = NULL;
+       lock->time = jiffies;
+       spin_unlock(&lock->lock);
 }
 
 #else /* ifdef CLIENT_OBD_LIST_LOCK_DEBUG */
 static inline void client_obd_list_lock(client_obd_lock_t *lock)
 {
-       cfs_spin_lock(&lock->lock);
+       spin_lock(&lock->lock);
 }
 
 static inline void client_obd_list_unlock(client_obd_lock_t *lock)
 {
-        cfs_spin_unlock(&lock->lock);
+       spin_unlock(&lock->lock);
 }
 
 #endif /* ifdef CLIENT_OBD_LIST_LOCK_DEBUG */
 
 static inline void client_obd_list_lock_init(client_obd_lock_t *lock)
 {
-        cfs_spin_lock_init(&lock->lock);
+       spin_lock_init(&lock->lock);
 }
 
 static inline void client_obd_list_lock_done(client_obd_lock_t *lock)
index 1e43ab2..08658d8 100644 (file)
@@ -74,8 +74,8 @@ struct lprocfs_static_vars {
 /* if we find more consumers this could be generalized */
 #define OBD_HIST_MAX 32
 struct obd_histogram {
-        cfs_spinlock_t oh_lock;
-        unsigned long  oh_buckets[OBD_HIST_MAX];
+       spinlock_t      oh_lock;
+       unsigned long   oh_buckets[OBD_HIST_MAX];
 };
 
 enum {
@@ -192,16 +192,16 @@ enum lprocfs_fields_flags {
 };
 
 struct lprocfs_stats {
-       unsigned short         ls_num;   /* # of counters */
-       unsigned short         ls_biggest_alloc_num;
-                                        /* 1 + the highest slot index which has
-                                         * been allocated, the 0th entry is
-                                         * a statically intialized template */
-       int                    ls_flags; /* See LPROCFS_STATS_FLAG_* */
+       unsigned short          ls_num; /* # of counters */
+       unsigned short          ls_biggest_alloc_num;
+                                       /* 1 + the highest slot index which has
+                                        * been allocated, the 0th entry is
+                                        * a statically intialized template */
+       int                     ls_flags; /* See LPROCFS_STATS_FLAG_* */
        /* Lock used when there are no percpu stats areas; For percpu stats,
         * it is used to protect ls_biggest_alloc_num change */
-       cfs_spinlock_t         ls_lock;
-       struct lprocfs_percpu *ls_percpu[0];
+       spinlock_t              ls_lock;
+       struct lprocfs_percpu   *ls_percpu[0];
 };
 
 #define OPC_RANGE(seg) (seg ## _LAST_OPC - seg ## _FIRST_OPC)
@@ -365,7 +365,7 @@ typedef void (*cntr_init_callback)(struct lprocfs_stats *stats);
 struct obd_job_stats {
        cfs_hash_t        *ojs_hash;
        cfs_list_t         ojs_list;
-       cfs_rwlock_t       ojs_lock; /* protect the obj_list */
+       rwlock_t       ojs_lock; /* protect the obj_list */
        cntr_init_callback ojs_cntr_init_fn;
        int                ojs_cntr_num;
        int                ojs_cleanup_interval;
@@ -402,9 +402,9 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
 
                /* non-percpu counter stats */
                if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
-                       cfs_spin_lock_irqsave(&stats->ls_lock, *flags);
+                       spin_lock_irqsave(&stats->ls_lock, *flags);
                else
-                       cfs_spin_lock(&stats->ls_lock);
+                       spin_lock(&stats->ls_lock);
                return 0;
 
        case LPROCFS_GET_NUM_CPU:
@@ -414,9 +414,9 @@ static inline int lprocfs_stats_lock(struct lprocfs_stats *stats, int opc,
 
                /* non-percpu counter stats */
                if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
-                       cfs_spin_lock_irqsave(&stats->ls_lock, *flags);
+                       spin_lock_irqsave(&stats->ls_lock, *flags);
                else
-                       cfs_spin_lock(&stats->ls_lock);
+                       spin_lock(&stats->ls_lock);
                return 1;
        }
 }
@@ -431,10 +431,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
        case LPROCFS_GET_SMP_ID:
                if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
                        if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
-                               cfs_spin_unlock_irqrestore(&stats->ls_lock,
+                               spin_unlock_irqrestore(&stats->ls_lock,
                                                           *flags);
                        } else {
-                               cfs_spin_unlock(&stats->ls_lock);
+                               spin_unlock(&stats->ls_lock);
                        }
                } else {
                        cfs_put_cpu();
@@ -444,10 +444,10 @@ static inline void lprocfs_stats_unlock(struct lprocfs_stats *stats, int opc,
        case LPROCFS_GET_NUM_CPU:
                if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
                        if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
-                               cfs_spin_unlock_irqrestore(&stats->ls_lock,
+                               spin_unlock_irqrestore(&stats->ls_lock,
                                                           *flags);
                        } else {
-                               cfs_spin_unlock(&stats->ls_lock);
+                               spin_unlock(&stats->ls_lock);
                        }
                }
                return;
@@ -690,14 +690,14 @@ extern int lprocfs_seq_release(cfs_inode_t *, struct file *);
  * the import in a client obd_device for a lprocfs entry */
 #define LPROCFS_CLIMP_CHECK(obd) do {           \
         typecheck(struct obd_device *, obd);    \
-        cfs_down_read(&(obd)->u.cli.cl_sem);    \
+       down_read(&(obd)->u.cli.cl_sem);    \
         if ((obd)->u.cli.cl_import == NULL) {   \
-             cfs_up_read(&(obd)->u.cli.cl_sem); \
+            up_read(&(obd)->u.cli.cl_sem); \
              return -ENODEV;                    \
         }                                       \
 } while(0)
 #define LPROCFS_CLIMP_EXIT(obd)                 \
-        cfs_up_read(&(obd)->u.cli.cl_sem);
+       up_read(&(obd)->u.cli.cl_sem);
 
 
 /* write the name##_seq_show function, call LPROC_SEQ_FOPS_RO for read-only
index 175de05..953c34c 100644 (file)
@@ -641,17 +641,17 @@ struct lu_site {
          * by ls_ld_lock.
          **/
         cfs_list_t                ls_ld_linkage;
-        cfs_spinlock_t            ls_ld_lock;
+       spinlock_t              ls_ld_lock;
 
-        /**
-         * lu_site stats
-         */
-        struct lprocfs_stats     *ls_stats;
-        struct lprocfs_stats     *ls_time_stats;
+       /**
+        * lu_site stats
+        */
+       struct lprocfs_stats    *ls_stats;
+       struct lprocfs_stats    *ls_time_stats;
        /**
         * XXX: a hack! fld has to find md_site via site, remove when possible
         */
-       struct md_site           *ld_md_site;
+       struct md_site          *ld_md_site;
 };
 
 static inline struct lu_site_bkt_data *
@@ -719,7 +719,7 @@ static inline void lu_object_get(struct lu_object *o)
  */
 static inline int lu_object_is_dying(const struct lu_object_header *h)
 {
-        return cfs_test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
+       return test_bit(LU_OBJECT_HEARD_BANSHEE, &h->loh_flags);
 }
 
 void lu_object_put(const struct lu_env *env, struct lu_object *o);
index 5bb3bf1..4c33d24 100644 (file)
@@ -118,10 +118,10 @@ struct lu_ref_link;
  * etc.) refer to.
  */
 struct lu_ref {
-        /**
-         * Spin-lock protecting lu_ref::lf_list.
-         */
-        cfs_spinlock_t       lf_guard;
+       /**
+        * Spin-lock protecting lu_ref::lf_list.
+        */
+       spinlock_t              lf_guard;
         /**
          * List of all outstanding references (each represented by struct
          * lu_ref_link), pointing to this object.
index bc5b327..f7d55f1 100644 (file)
@@ -52,11 +52,11 @@ struct lu_target {
         /** Server last transaction number */
         __u64                    lut_last_transno;
         /** Lock protecting last transaction number */
-        cfs_spinlock_t           lut_translock;
-        /** Lock protecting client bitmap */
-        cfs_spinlock_t           lut_client_bitmap_lock;
-        /** Bitmap of known clients */
-        unsigned long           *lut_client_bitmap;
+       spinlock_t               lut_translock;
+       /** Lock protecting client bitmap */
+       spinlock_t               lut_client_bitmap_lock;
+       /** Bitmap of known clients */
+       unsigned long           *lut_client_bitmap;
 };
 
 typedef void (*tgt_cb_t)(struct lu_target *lut, __u64 transno,
index 6e0873a..212fd5c 100644 (file)
@@ -83,13 +83,13 @@ struct obd_capa {
         struct lustre_capa        c_capa;       /* capa */
         cfs_atomic_t              c_refc;       /* ref count */
         cfs_time_t                c_expiry;     /* jiffies */
-        cfs_spinlock_t            c_lock;       /* protect capa content */
-        int                       c_site;
+       spinlock_t              c_lock; /* protect capa content */
+       int                     c_site;
 
-        union {
-                struct client_capa      cli;
-                struct target_capa      tgt;
-        } u;
+       union {
+               struct client_capa      cli;
+               struct target_capa      tgt;
+       } u;
 };
 
 enum {
@@ -175,7 +175,7 @@ typedef int (* renew_capa_cb_t)(struct obd_capa *, struct lustre_capa *);
 
 /* obdclass/capa.c */
 extern cfs_list_t capa_list[];
-extern cfs_spinlock_t capa_lock;
+extern spinlock_t capa_lock;
 extern int capa_count[];
 extern cfs_mem_cache_t *capa_cachep;
 
@@ -205,7 +205,7 @@ static inline struct obd_capa *alloc_capa(int site)
 
         CFS_INIT_LIST_HEAD(&ocapa->c_list);
         cfs_atomic_set(&ocapa->c_refc, 1);
-        cfs_spin_lock_init(&ocapa->c_lock);
+       spin_lock_init(&ocapa->c_lock);
         ocapa->c_site = site;
         if (ocapa->c_site == CAPA_SITE_CLIENT)
                 CFS_INIT_LIST_HEAD(&ocapa->u.cli.lli_list);
index c692f3d..55a2665 100644 (file)
@@ -339,7 +339,7 @@ struct ldlm_pool {
         /**
          * Lock for protecting slv/clv updates.
          */
-        cfs_spinlock_t         pl_lock;
+       spinlock_t              pl_lock;
         /**
          * Number of allowed locks in in pool, both, client and server side.
          */
@@ -471,7 +471,7 @@ struct ldlm_namespace {
         /**
          * serialize
          */
-        cfs_spinlock_t         ns_lock;
+       spinlock_t              ns_lock;
 
         /**
          * big refcount (by bucket)
@@ -676,7 +676,7 @@ struct ldlm_lock {
          * Internal spinlock protects l_resource.  we should hold this lock
          * first before grabbing res_lock.
          */
-        cfs_spinlock_t           l_lock;
+       spinlock_t              l_lock;
         /**
          * ldlm_lock_change_resource() can change this.
          */
@@ -869,11 +869,11 @@ struct ldlm_lock {
 };
 
 struct ldlm_resource {
-        struct ldlm_ns_bucket *lr_ns_bucket;
+       struct ldlm_ns_bucket   *lr_ns_bucket;
 
-        /* protected by ns_hash_lock */
-        cfs_hlist_node_t       lr_hash;
-        cfs_spinlock_t         lr_lock;
+       /* protected by ns_hash_lock */
+       cfs_hlist_node_t        lr_hash;
+       spinlock_t              lr_lock;
 
         /* protected by lr_lock */
         cfs_list_t             lr_granted;
@@ -888,7 +888,7 @@ struct ldlm_resource {
 
         /* Server-side-only lock value block elements */
         /** to serialize lvbo_init */
-        cfs_mutex_t            lr_lvb_mutex;
+       struct mutex            lr_lvb_mutex;
         __u32                  lr_lvb_len;
         /** protect by lr_lock */
         void                  *lr_lvb_data;
@@ -1343,18 +1343,18 @@ enum lock_res_type {
 
 static inline void lock_res(struct ldlm_resource *res)
 {
-        cfs_spin_lock(&res->lr_lock);
+       spin_lock(&res->lr_lock);
 }
 
 static inline void lock_res_nested(struct ldlm_resource *res,
                                    enum lock_res_type mode)
 {
-        cfs_spin_lock_nested(&res->lr_lock, mode);
+       spin_lock_nested(&res->lr_lock, mode);
 }
 
 static inline void unlock_res(struct ldlm_resource *res)
 {
-        cfs_spin_unlock(&res->lr_lock);
+       spin_unlock(&res->lr_lock);
 }
 
 static inline void check_res_locked(struct ldlm_resource *res)
index ef0d64e..3c08e6d 100644 (file)
@@ -59,28 +59,28 @@ struct mdt_idmap_table;
  * Target-specific export data
  */
 struct tg_export_data {
-        /** Protects led_lcd below */
-        cfs_mutex_t             ted_lcd_lock;
-        /** Per-client data for each export */
-        struct lsd_client_data *ted_lcd;
-        /** Offset of record in last_rcvd file */
-        loff_t                  ted_lr_off;
-        /** Client index in last_rcvd file */
-        int                     ted_lr_idx;
+       /** Protects led_lcd below */
+       struct mutex            ted_lcd_lock;
+       /** Per-client data for each export */
+       struct lsd_client_data  *ted_lcd;
+       /** Offset of record in last_rcvd file */
+       loff_t                  ted_lr_off;
+       /** Client index in last_rcvd file */
+       int                     ted_lr_idx;
 };
 
 /**
  * MDT-specific export data
  */
 struct mdt_export_data {
-        struct tg_export_data   med_ted;
-        /** List of all files opened by client on this MDT */
-        cfs_list_t              med_open_head;
-        cfs_spinlock_t          med_open_lock; /* lock med_open_head, mfd_list*/
-        /** Bitmask of all ibit locks this MDT understands */
-        __u64                   med_ibits_known;
-        cfs_mutex_t             med_idmap_mutex;
-        struct lustre_idmap_table *med_idmap;
+       struct tg_export_data   med_ted;
+       /** List of all files opened by client on this MDT */
+       cfs_list_t              med_open_head;
+       spinlock_t              med_open_lock; /* med_open_head, mfd_list */
+       /** Bitmask of all ibit locks this MDT understands */
+       __u64                   med_ibits_known;
+       struct mutex            med_idmap_mutex;
+       struct lustre_idmap_table *med_idmap;
 };
 
 struct ec_export_data { /* echo client */
@@ -90,8 +90,8 @@ struct ec_export_data { /* echo client */
 /* In-memory access to client data from OST struct */
 /** Filter (oss-side) specific import data */
 struct filter_export_data {
-        struct tg_export_data      fed_ted;
-        cfs_spinlock_t             fed_lock;     /**< protects fed_mod_list */
+       struct tg_export_data   fed_ted;
+       spinlock_t              fed_lock;       /**< protects fed_mod_list */
         long                       fed_dirty;    /* in bytes */
         long                       fed_grant;    /* in bytes */
         cfs_list_t                 fed_mod_list; /* files being modified */
@@ -102,8 +102,8 @@ struct filter_export_data {
 };
 
 struct mgs_export_data {
-        cfs_list_t                 med_clients; /* mgc fs client via this exp */
-        cfs_spinlock_t             med_lock;    /* protect med_clients */
+       cfs_list_t              med_clients;    /* mgc fs client via this exp */
+       spinlock_t              med_lock;       /* protect med_clients */
 };
 
 /**
@@ -169,7 +169,7 @@ struct obd_export {
         cfs_atomic_t              exp_locks_count; /** Lock references */
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
         cfs_list_t                exp_locks_list;
-        cfs_spinlock_t            exp_locks_list_guard;
+       spinlock_t                exp_locks_list_guard;
 #endif
         /** UUID of client connected to this export */
         struct obd_uuid           exp_client_uuid;
@@ -206,7 +206,7 @@ struct obd_export {
        cfs_hash_t               *exp_flock_hash;
         cfs_list_t                exp_outstanding_replies;
         cfs_list_t                exp_uncommitted_replies;
-        cfs_spinlock_t            exp_uncommitted_replies_lock;
+       spinlock_t                exp_uncommitted_replies_lock;
         /** Last committed transno for this export */
         __u64                     exp_last_committed;
         /** When was last request received */
@@ -217,7 +217,7 @@ struct obd_export {
         * protects exp_flags, exp_outstanding_replies and the change
         * of exp_imp_reverse
         */
-        cfs_spinlock_t            exp_lock;
+       spinlock_t                exp_lock;
         /** Compatibility flags for this export */
         __u64                     exp_connect_flags;
         enum obd_option           exp_flags;
@@ -248,12 +248,12 @@ struct obd_export {
         cfs_time_t                exp_flvr_expire[2];   /* seconds */
 
         /** protects exp_hp_rpcs */
-        cfs_spinlock_t            exp_rpc_lock;
-        cfs_list_t                exp_hp_rpcs;  /* (potential) HP RPCs */
+       spinlock_t                exp_rpc_lock;
+       cfs_list_t                exp_hp_rpcs;  /* (potential) HP RPCs */
 
         /** blocking dlm lock list, protected by exp_bl_list_lock */
         cfs_list_t                exp_bl_list;
-        cfs_spinlock_t            exp_bl_list_lock;
+       spinlock_t                exp_bl_list_lock;
 
         /** Target specific data */
         union {
index 0edf9f2..44418c5 100644 (file)
@@ -275,7 +275,7 @@ struct lu_server_seq;
 struct lu_client_seq {
         /* Sequence-controller export. */
         struct obd_export      *lcs_exp;
-        cfs_mutex_t             lcs_mutex;
+       struct mutex            lcs_mutex;
 
         /*
          * Range of allowed for allocation sequeces. When using lu_client_seq on
@@ -341,7 +341,7 @@ struct lu_server_seq {
         struct lu_client_seq   *lss_cli;
 
         /* Mutex for protecting allocation */
-        cfs_mutex_t             lss_mutex;
+       struct mutex            lss_mutex;
 
         /*
          * Service uuid, passed from MDT + seq name to form unique seq name to
index 836ecfd..7a26a9e 100644 (file)
@@ -92,7 +92,7 @@ struct lu_server_fld {
 
         /**
          * Protect index modifications */
-        cfs_mutex_t              lsf_lock;
+       struct mutex            lsf_lock;
 
         /**
          * Fld service name in form "fld-srv-lustre-MDTXXX" */
@@ -123,7 +123,7 @@ struct lu_client_fld {
 
         /**
          * Lock protecting exports list and fld_hash. */
-        cfs_spinlock_t           lcf_lock;
+       spinlock_t               lcf_lock;
 
         /**
          * Client FLD cache. */
index 75854be..55b681f 100644 (file)
@@ -81,7 +81,7 @@ struct portals_handle {
 
        /* newly added fields to handle the RCU issue. -jxiong */
        cfs_rcu_head_t                  h_rcu;
-       cfs_spinlock_t                  h_lock;
+       spinlock_t                      h_lock;
        unsigned int                    h_size:31;
        unsigned int                    h_in:1;
 };
index b2a1dc0..27f50b5 100644 (file)
@@ -72,8 +72,8 @@ enum lustre_idmap_idx {
 };
 
 struct lustre_idmap_table {
-        cfs_spinlock_t   lit_lock;
-        cfs_list_t       lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
+       spinlock_t      lit_lock;
+       cfs_list_t      lit_idmaps[CFS_IDMAP_N_HASHES][CFS_IDMAP_HASHSIZE];
 };
 
 extern void lustre_groups_from_list(cfs_group_info_t *ginfo, gid_t *glist);
index 9338a97..a0fa550 100644 (file)
 #define AT_FLG_NOHIST 0x1          /* use last reported value only */
 
 struct adaptive_timeout {
-        time_t           at_binstart;         /* bin start time */
-        unsigned int     at_hist[AT_BINS];    /* timeout history bins */
-        unsigned int     at_flags;
-        unsigned int     at_current;          /* current timeout value */
-        unsigned int     at_worst_ever;       /* worst-ever timeout value */
-        time_t           at_worst_time;       /* worst-ever timeout timestamp */
-        cfs_spinlock_t   at_lock;
+       time_t          at_binstart;         /* bin start time */
+       unsigned int    at_hist[AT_BINS];    /* timeout history bins */
+       unsigned int    at_flags;
+       unsigned int    at_current;          /* current timeout value */
+       unsigned int    at_worst_ever;       /* worst-ever timeout value */
+       time_t          at_worst_time;       /* worst-ever timeout timestamp */
+       spinlock_t      at_lock;
 };
 
 struct ptlrpc_at_array {
@@ -188,7 +188,7 @@ struct obd_import {
          * @{
          */
         struct ptlrpc_sec        *imp_sec;
-        cfs_mutex_t               imp_sec_mutex;
+       struct mutex              imp_sec_mutex;
         cfs_time_t                imp_sec_expire;
         /** @} */
 
@@ -248,7 +248,7 @@ struct obd_import {
         struct obd_import_conn   *imp_conn_current;
 
         /** Protects flags, level, generation, conn_cnt, *_list */
-        cfs_spinlock_t            imp_lock;
+       spinlock_t                imp_lock;
 
         /* flags */
         unsigned long             imp_no_timeout:1,       /* timeouts are disabled */
@@ -323,7 +323,7 @@ static inline void at_reset(struct adaptive_timeout *at, int val) {
 }
 static inline void at_init(struct adaptive_timeout *at, int val, int flags) {
        memset(at, 0, sizeof(*at));
-       cfs_spin_lock_init(&at->at_lock);
+       spin_lock_init(&at->at_lock);
        at->at_flags = flags;
        at_reset(at, val);
 }
index c28f5b3..88ba317 100644 (file)
@@ -118,10 +118,10 @@ void statfs_unpack(cfs_kstatfs_t *sfs, struct obd_statfs *osfs);
 
 /* l_lock.c */
 struct lustre_lock {
-        int l_depth;
-        cfs_task_t *l_owner;
-        cfs_semaphore_t l_sem;
-        cfs_spinlock_t l_spin;
+       int                     l_depth;
+       cfs_task_t              *l_owner;
+       struct semaphore        l_sem;
+       spinlock_t              l_spin;
 };
 
 void l_lock_init(struct lustre_lock *);
index d03daab..ec903be 100644 (file)
@@ -123,7 +123,7 @@ struct lustre_client_ocd {
          * under ->lco_lock.
          */
         __u64              lco_flags;
-        cfs_mutex_t        lco_lock;
+       struct mutex       lco_lock;
         struct obd_export *lco_md_exp;
         struct obd_export *lco_dt_exp;
 };
index b62255f..24a7c47 100644 (file)
@@ -333,8 +333,8 @@ struct llog_operations {
 
 /* In-memory descriptor for a log object or log catalog */
 struct llog_handle {
-       cfs_rw_semaphore_t       lgh_lock;
-       cfs_spinlock_t           lgh_hdr_lock; /* protect lgh_hdr data */
+       struct rw_semaphore      lgh_lock;
+       spinlock_t               lgh_hdr_lock; /* protect lgh_hdr data */
        struct llog_logid        lgh_id; /* id of this log */
        struct llog_log_hdr     *lgh_hdr;
        struct file             *lgh_file;
@@ -383,7 +383,7 @@ struct llog_ctxt {
         struct llog_handle      *loc_handle;
         struct llog_commit_master *loc_lcm;
         struct llog_canceld_ctxt *loc_llcd;
-        cfs_mutex_t              loc_mutex; /* protects loc_llcd and loc_imp */
+       struct mutex             loc_mutex; /* protect loc_llcd and loc_imp */
         cfs_atomic_t             loc_refcount;
         void                    *llog_proc_cb;
         long                     loc_flags; /* flags, see above defines */
@@ -412,7 +412,7 @@ struct llog_commit_master {
         /**
          * Lock protecting list of llcds.
          */
-        cfs_spinlock_t             lcm_lock;
+       spinlock_t                 lcm_lock;
         /**
          * Llcds in flight for debugging purposes.
          */
@@ -551,64 +551,63 @@ static inline void llog_ctxt_put(struct llog_ctxt *ctxt)
 
 static inline void llog_group_init(struct obd_llog_group *olg, int group)
 {
-        cfs_waitq_init(&olg->olg_waitq);
-        cfs_spin_lock_init(&olg->olg_lock);
-        cfs_mutex_init(&olg->olg_cat_processing);
-        olg->olg_seq = group;
+       cfs_waitq_init(&olg->olg_waitq);
+       spin_lock_init(&olg->olg_lock);
+       mutex_init(&olg->olg_cat_processing);
+       olg->olg_seq = group;
 }
 
 static inline void llog_group_set_export(struct obd_llog_group *olg,
                                          struct obd_export *exp)
 {
-        LASSERT(exp != NULL);
-
-        cfs_spin_lock(&olg->olg_lock);
-        if (olg->olg_exp != NULL && olg->olg_exp != exp)
-                CWARN("%s: export for group %d is changed: 0x%p -> 0x%p\n",
-                      exp->exp_obd->obd_name, olg->olg_seq,
-                      olg->olg_exp, exp);
-        olg->olg_exp = exp;
-        cfs_spin_unlock(&olg->olg_lock);
+       LASSERT(exp != NULL);
+
+       spin_lock(&olg->olg_lock);
+       if (olg->olg_exp != NULL && olg->olg_exp != exp)
+               CWARN("%s: export for group %d is changed: 0x%p -> 0x%p\n",
+                     exp->exp_obd->obd_name, olg->olg_seq,
+                     olg->olg_exp, exp);
+       olg->olg_exp = exp;
+       spin_unlock(&olg->olg_lock);
 }
 
 static inline int llog_group_set_ctxt(struct obd_llog_group *olg,
                                       struct llog_ctxt *ctxt, int index)
 {
-        LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
-
-        cfs_spin_lock(&olg->olg_lock);
-        if (olg->olg_ctxts[index] != NULL) {
-                cfs_spin_unlock(&olg->olg_lock);
-                return -EEXIST;
-        }
-        olg->olg_ctxts[index] = ctxt;
-        cfs_spin_unlock(&olg->olg_lock);
-        return 0;
+       LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
+
+       spin_lock(&olg->olg_lock);
+       if (olg->olg_ctxts[index] != NULL) {
+               spin_unlock(&olg->olg_lock);
+               return -EEXIST;
+       }
+       olg->olg_ctxts[index] = ctxt;
+       spin_unlock(&olg->olg_lock);
+       return 0;
 }
 
 static inline struct llog_ctxt *llog_group_get_ctxt(struct obd_llog_group *olg,
                                                     int index)
 {
-        struct llog_ctxt *ctxt;
+       struct llog_ctxt *ctxt;
 
-        LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
+       LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
 
-        cfs_spin_lock(&olg->olg_lock);
-        if (olg->olg_ctxts[index] == NULL) {
-                ctxt = NULL;
-        } else {
-                ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
-        }
-        cfs_spin_unlock(&olg->olg_lock);
-        return ctxt;
+       spin_lock(&olg->olg_lock);
+       if (olg->olg_ctxts[index] == NULL)
+               ctxt = NULL;
+       else
+               ctxt = llog_ctxt_get(olg->olg_ctxts[index]);
+       spin_unlock(&olg->olg_lock);
+       return ctxt;
 }
 
 static inline void llog_group_clear_ctxt(struct obd_llog_group *olg, int index)
 {
        LASSERT(index >= 0 && index < LLOG_MAX_CTXTS);
-       cfs_spin_lock(&olg->olg_lock);
+       spin_lock(&olg->olg_lock);
        olg->olg_ctxts[index] = NULL;
-       cfs_spin_unlock(&olg->olg_lock);
+       spin_unlock(&olg->olg_lock);
 }
 
 static inline struct llog_ctxt *llog_get_context(struct obd_device *obd,
index e302dcd..27441f9 100644 (file)
@@ -70,7 +70,7 @@ struct ptlrpc_request;
 struct obd_device;
 
 struct mdc_rpc_lock {
-       cfs_mutex_t             rpcl_mutex;
+       struct mutex            rpcl_mutex;
        struct lookup_intent    *rpcl_it;
        int                     rpcl_fakes;
 };
@@ -79,7 +79,7 @@ struct mdc_rpc_lock {
 
 static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
 {
-        cfs_mutex_init(&lck->rpcl_mutex);
+       mutex_init(&lck->rpcl_mutex);
         lck->rpcl_it = NULL;
 }
 
@@ -98,12 +98,12 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
         * Only when all fake requests are finished can normal requests
         * be sent, to ensure they are recoverable again. */
  again:
-       cfs_mutex_lock(&lck->rpcl_mutex);
+       mutex_lock(&lck->rpcl_mutex);
 
        if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM)) {
                lck->rpcl_it = MDC_FAKE_RPCL_IT;
                lck->rpcl_fakes++;
-               cfs_mutex_unlock(&lck->rpcl_mutex);
+               mutex_unlock(&lck->rpcl_mutex);
                return;
        }
 
@@ -113,7 +113,7 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
         * in this extremely rare case, just have low overhead in
         * the common case when it isn't true. */
        while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) {
-               cfs_mutex_unlock(&lck->rpcl_mutex);
+               mutex_unlock(&lck->rpcl_mutex);
                cfs_schedule_timeout(cfs_time_seconds(1) / 4);
                goto again;
        }
@@ -129,7 +129,7 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
                goto out;
 
        if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
-               cfs_mutex_lock(&lck->rpcl_mutex);
+               mutex_lock(&lck->rpcl_mutex);
 
                LASSERTF(lck->rpcl_fakes > 0, "%d\n", lck->rpcl_fakes);
                lck->rpcl_fakes--;
@@ -142,7 +142,7 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
                lck->rpcl_it = NULL;
        }
 
-       cfs_mutex_unlock(&lck->rpcl_mutex);
+       mutex_unlock(&lck->rpcl_mutex);
  out:
        EXIT;
 }
index 0c00e64..25ee3e7 100644 (file)
@@ -473,7 +473,7 @@ struct ptlrpc_request_set {
         * locked so that any old caller can communicate requests to
         * the set holder who can then fold them into the lock-free set
         */
-       cfs_spinlock_t        set_new_req_lock;
+       spinlock_t              set_new_req_lock;
        /** List of new yet unsent requests. Only used with ptlrpcd now. */
        cfs_list_t            set_new_requests;
 
@@ -535,7 +535,7 @@ struct ptlrpc_reply_state {
         cfs_list_t             rs_debug_list;
 #endif
         /** A spinlock to protect the reply state flags */
-        cfs_spinlock_t         rs_lock;
+       spinlock_t              rs_lock;
         /** Reply state flags */
         unsigned long          rs_difficult:1;     /* ACK/commit stuff */
         unsigned long          rs_no_ack:1;    /* no ACK, even for
@@ -610,8 +610,8 @@ typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
  * any allocations (to avoid e.g. OOM).
  */
 struct ptlrpc_request_pool {
-        /** Locks the list */
-        cfs_spinlock_t prp_lock;
+       /** Locks the list */
+       spinlock_t prp_lock;
         /** list of ptlrpc_request structs */
         cfs_list_t prp_req_list;
         /** Maximum message size that would fit into a rquest from this pool */
@@ -691,8 +691,8 @@ struct ptlrpc_request {
         /** Lock to protect request flags and some other important bits, like
          * rq_list
          */
-        cfs_spinlock_t rq_lock;
-        /** client-side flags are serialized by rq_lock */
+       spinlock_t rq_lock;
+       /** client-side flags are serialized by rq_lock */
        unsigned int rq_intr:1, rq_replied:1, rq_err:1,
                 rq_timedout:1, rq_resend:1, rq_restart:1,
                 /**
@@ -1109,7 +1109,7 @@ struct ptlrpc_bulk_desc {
         /** client side */
         unsigned long bd_registered:1;
         /** For serialization with callback */
-        cfs_spinlock_t bd_lock;
+       spinlock_t bd_lock;
         /** Import generation when request for this bulk was sent */
         int bd_import_generation;
         /** Server side - export this bulk created for */
@@ -1322,7 +1322,7 @@ struct ptlrpc_service_ops {
  */
 struct ptlrpc_service {
        /** serialize /proc operations */
-       cfs_spinlock_t                  srv_lock;
+       spinlock_t                      srv_lock;
         /** most often accessed fields */
         /** chain thru all services */
         cfs_list_t                      srv_list;
@@ -1421,7 +1421,7 @@ struct ptlrpc_service_part {
         * rqbd list and incoming requests waiting for preprocess,
         * threads starting & stopping are also protected by this lock.
         */
-       cfs_spinlock_t                  scp_lock  __cfs_cacheline_aligned;
+       spinlock_t                      scp_lock  __cfs_cacheline_aligned;
        /** total # req buffer descs allocated */
        int                             scp_nrqbds_total;
        /** # posted request buffers for receiving */
@@ -1457,7 +1457,7 @@ struct ptlrpc_service_part {
         * serialize the following fields, used for processing requests
         * sent to this portal
         */
-       cfs_spinlock_t                  scp_req_lock __cfs_cacheline_aligned;
+       spinlock_t                      scp_req_lock __cfs_cacheline_aligned;
        /** # reqs in either of the queues below */
        /** reqs waiting for service */
        cfs_list_t                      scp_req_pending;
@@ -1476,7 +1476,7 @@ struct ptlrpc_service_part {
         * serialize the following fields, used for changes on
         * adaptive timeout
         */
-       cfs_spinlock_t                  scp_at_lock __cfs_cacheline_aligned;
+       spinlock_t                      scp_at_lock __cfs_cacheline_aligned;
        /** estimated rpc service time */
        struct adaptive_timeout         scp_at_estimate;
        /** reqs waiting for replies */
@@ -1493,7 +1493,7 @@ struct ptlrpc_service_part {
         * serialize the following fields, used for processing
         * replies for this portal
         */
-       cfs_spinlock_t                  scp_rep_lock __cfs_cacheline_aligned;
+       spinlock_t                      scp_rep_lock __cfs_cacheline_aligned;
        /** all the active replies */
        cfs_list_t                      scp_rep_active;
 #ifndef __KERNEL__
@@ -1518,22 +1518,22 @@ struct ptlrpc_service_part {
  * Declaration of ptlrpcd control structure
  */
 struct ptlrpcd_ctl {
-        /**
-         * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
-         */
-        unsigned long               pc_flags;
-        /**
-         * Thread lock protecting structure fields.
-         */
-        cfs_spinlock_t              pc_lock;
-        /**
-         * Start completion.
-         */
-        cfs_completion_t            pc_starting;
-        /**
-         * Stop completion.
-         */
-        cfs_completion_t            pc_finishing;
+       /**
+        * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
+        */
+       unsigned long                   pc_flags;
+       /**
+        * Thread lock protecting structure fields.
+        */
+       spinlock_t                      pc_lock;
+       /**
+        * Start completion.
+        */
+       struct completion               pc_starting;
+       /**
+        * Stop completion.
+        */
+       struct completion               pc_finishing;
         /**
          * Thread requests set.
          */
@@ -1651,14 +1651,14 @@ void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
 
 static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
 {
-        int rc;
+       int rc;
 
-        LASSERT(desc != NULL);
+       LASSERT(desc != NULL);
 
-        cfs_spin_lock(&desc->bd_lock);
-        rc = desc->bd_network_rw;
-        cfs_spin_unlock(&desc->bd_lock);
-        return rc;
+       spin_lock(&desc->bd_lock);
+       rc = desc->bd_network_rw;
+       spin_unlock(&desc->bd_lock);
+       return rc;
 }
 #endif
 
@@ -1679,10 +1679,10 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
         if (!desc)
                 return 0;
 
-        cfs_spin_lock(&desc->bd_lock);
-        rc = desc->bd_network_rw;
-        cfs_spin_unlock(&desc->bd_lock);
-        return rc;
+       spin_lock(&desc->bd_lock);
+       rc = desc->bd_network_rw;
+       spin_unlock(&desc->bd_lock);
+       return rc;
 }
 
 #define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
@@ -2080,17 +2080,17 @@ ptlrpc_client_recv(struct ptlrpc_request *req)
 static inline int
 ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
 {
-        int rc;
-
-        cfs_spin_lock(&req->rq_lock);
-        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
-            req->rq_reply_deadline > cfs_time_current_sec()) {
-                cfs_spin_unlock(&req->rq_lock);
-                return 1;
-        }
-        rc = req->rq_receiving_reply || req->rq_must_unlink;
-        cfs_spin_unlock(&req->rq_lock);
-        return rc;
+       int rc;
+
+       spin_lock(&req->rq_lock);
+       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
+           req->rq_reply_deadline > cfs_time_current_sec()) {
+               spin_unlock(&req->rq_lock);
+               return 1;
+       }
+       rc = req->rq_receiving_reply || req->rq_must_unlink;
+       spin_unlock(&req->rq_lock);
+       return rc;
 }
 
 static inline void
@@ -2157,12 +2157,12 @@ static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
 
 static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
 {
-        if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_no_resend = 1;
-                cfs_spin_unlock(&req->rq_lock);
-        }
-        return req->rq_no_resend;
+       if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
+               spin_lock(&req->rq_lock);
+               req->rq_no_resend = 1;
+               spin_unlock(&req->rq_lock);
+       }
+       return req->rq_no_resend;
 }
 
 static inline int
index 6c4207c..1d4e7c6 100644 (file)
@@ -512,7 +512,7 @@ struct ptlrpc_cli_ctx {
         unsigned int            cc_early_expire:1;
         unsigned long           cc_flags;
         struct vfs_cred         cc_vcred;
-        cfs_spinlock_t          cc_lock;
+       spinlock_t              cc_lock;
         cfs_list_t              cc_req_list;   /* waiting reqs linked here */
         cfs_list_t              cc_gc_chain;   /* linked to gc chain */
 };
@@ -827,7 +827,7 @@ struct ptlrpc_sec {
         unsigned int                    ps_dying:1;
         /** owning import */
         struct obd_import              *ps_import;
-        cfs_spinlock_t                  ps_lock;
+       spinlock_t                      ps_lock;
 
         /*
          * garbage collection
index 376522f..81d655f 100644 (file)
@@ -390,7 +390,7 @@ enum md_upcall_event {
 struct md_upcall {
         /** this lock protects upcall using against its removal
          * read lock is for usage the upcall, write - for init/fini */
-        cfs_rw_semaphore_t      mu_upcall_sem;
+       struct rw_semaphore     mu_upcall_sem;
         /** device to call, upper layer normally */
         struct md_device       *mu_upcall_dev;
         /** upcall function */
@@ -406,39 +406,39 @@ struct md_device {
 
 static inline void md_upcall_init(struct md_device *m, void *upcl)
 {
-        cfs_init_rwsem(&m->md_upcall.mu_upcall_sem);
-        m->md_upcall.mu_upcall_dev = NULL;
-        m->md_upcall.mu_upcall = upcl;
+       init_rwsem(&m->md_upcall.mu_upcall_sem);
+       m->md_upcall.mu_upcall_dev = NULL;
+       m->md_upcall.mu_upcall = upcl;
 }
 
 static inline void md_upcall_dev_set(struct md_device *m, struct md_device *up)
 {
-        cfs_down_write(&m->md_upcall.mu_upcall_sem);
-        m->md_upcall.mu_upcall_dev = up;
-        cfs_up_write(&m->md_upcall.mu_upcall_sem);
+       down_write(&m->md_upcall.mu_upcall_sem);
+       m->md_upcall.mu_upcall_dev = up;
+       up_write(&m->md_upcall.mu_upcall_sem);
 }
 
 static inline void md_upcall_fini(struct md_device *m)
 {
-        cfs_down_write(&m->md_upcall.mu_upcall_sem);
-        m->md_upcall.mu_upcall_dev = NULL;
-        m->md_upcall.mu_upcall = NULL;
-        cfs_up_write(&m->md_upcall.mu_upcall_sem);
+       down_write(&m->md_upcall.mu_upcall_sem);
+       m->md_upcall.mu_upcall_dev = NULL;
+       m->md_upcall.mu_upcall = NULL;
+       up_write(&m->md_upcall.mu_upcall_sem);
 }
 
 static inline int md_do_upcall(const struct lu_env *env, struct md_device *m,
-                               enum md_upcall_event ev, void *data)
-{
-        int rc = 0;
-        cfs_down_read(&m->md_upcall.mu_upcall_sem);
-        if (m->md_upcall.mu_upcall_dev != NULL &&
-            m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall != NULL) {
-                rc = m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall(env,
-                                              m->md_upcall.mu_upcall_dev,
-                                              ev, data);
-        }
-        cfs_up_read(&m->md_upcall.mu_upcall_sem);
-        return rc;
+                               enum md_upcall_event ev, void *data)
+{
+       int rc = 0;
+       down_read(&m->md_upcall.mu_upcall_sem);
+       if (m->md_upcall.mu_upcall_dev != NULL &&
+           m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall != NULL) {
+               rc = m->md_upcall.mu_upcall_dev->md_upcall.mu_upcall(env,
+                                             m->md_upcall.mu_upcall_dev,
+                                             ev, data);
+       }
+       up_read(&m->md_upcall.mu_upcall_sem);
+       return rc;
 }
 
 struct md_object {
index 8745a39..6a2ab5f 100644 (file)
@@ -102,7 +102,7 @@ static inline void loi_init(struct lov_oinfo *loi)
 
 struct lov_stripe_md {
        cfs_atomic_t     lsm_refc;
-        cfs_spinlock_t   lsm_lock;
+       spinlock_t      lsm_lock;
         pid_t            lsm_lock_owner; /* debugging */
 
         /* maximum possible file size, might change as OSTs status changes,
@@ -244,7 +244,7 @@ struct obd_type {
         char *typ_name;
         int  typ_refcnt;
         struct lu_device_type *typ_lu;
-        cfs_spinlock_t obd_type_lock;
+       spinlock_t obd_type_lock;
 };
 
 struct brw_page {
@@ -274,7 +274,7 @@ struct obd_device_target {
         struct lu_target         *obt_lut;
 #endif
         __u64                     obt_mount_count;
-        cfs_rw_semaphore_t        obt_rwsem;
+       struct rw_semaphore       obt_rwsem;
         struct vfsmount          *obt_vfsmnt;
         struct file              *obt_health_check_filp;
        struct osd_properties     obt_osd_properties;
@@ -322,13 +322,13 @@ struct filter_obd {
         cfs_dentry_t        *fo_dentry_O;
         cfs_dentry_t       **fo_dentry_O_groups;
         struct filter_subdirs   *fo_dentry_O_sub;
-        cfs_mutex_t          fo_init_lock;      /* group initialization lock */
-        int                  fo_committed_group;
+       struct mutex            fo_init_lock;   /* group initialization lock*/
+       int                     fo_committed_group;
 
-        cfs_spinlock_t       fo_objidlock;      /* protect fo_lastobjid */
+       spinlock_t              fo_objidlock;   /* protect fo_lastobjid */
 
-        unsigned long        fo_destroys_in_progress;
-        cfs_mutex_t          fo_create_locks[FILTER_SUBDIR_COUNT];
+       unsigned long           fo_destroys_in_progress;
+       struct mutex            fo_create_locks[FILTER_SUBDIR_COUNT];
 
         cfs_list_t fo_export_list;
         int                  fo_subdir_count;
@@ -339,7 +339,7 @@ struct filter_obd {
         int                  fo_tot_granted_clients;
 
         obd_size             fo_readcache_max_filesize;
-        cfs_spinlock_t       fo_flags_lock;
+       spinlock_t              fo_flags_lock;
         unsigned int         fo_read_cache:1,   /**< enable read-only cache */
                              fo_writethrough_cache:1,/**< read cache writes */
                              fo_mds_ost_sync:1, /**< MDS-OST orphan recovery*/
@@ -352,7 +352,7 @@ struct filter_obd {
         __u64               *fo_last_objids; /* last created objid for groups,
                                               * protected by fo_objidlock */
 
-        cfs_mutex_t          fo_alloc_lock;
+       struct mutex            fo_alloc_lock;
 
         cfs_atomic_t         fo_r_in_flight;
         cfs_atomic_t         fo_w_in_flight;
@@ -370,8 +370,8 @@ struct filter_obd {
         */
        struct cfs_hash         *fo_iobuf_hash;
 
-        cfs_list_t               fo_llog_list;
-        cfs_spinlock_t           fo_llog_list_lock;
+       cfs_list_t              fo_llog_list;
+       spinlock_t              fo_llog_list_lock;
 
         struct brw_stats         fo_filter_stats;
 
@@ -382,7 +382,7 @@ struct filter_obd {
 
 
         /* sptlrpc stuff */
-        cfs_rwlock_t             fo_sptlrpc_lock;
+       rwlock_t                fo_sptlrpc_lock;
         struct sptlrpc_rule_set  fo_sptlrpc_rset;
 
         /* capability related */
@@ -423,7 +423,7 @@ enum {
 struct mdc_rpc_lock;
 struct obd_import;
 struct client_obd {
-        cfs_rw_semaphore_t       cl_sem;
+       struct rw_semaphore  cl_sem;
         struct obd_uuid          cl_target_uuid;
         struct obd_import       *cl_import; /* ptlrpc connection state */
         int                      cl_conn_count;
@@ -521,7 +521,7 @@ struct client_obd {
         struct mdc_rpc_lock     *cl_close_lock;
 
         /* mgc datastruct */
-        cfs_semaphore_t          cl_mgc_sem;
+       struct semaphore         cl_mgc_sem;
         struct vfsmount         *cl_mgc_vfsmnt;
         struct dentry           *cl_mgc_configs_dir;
         cfs_atomic_t             cl_mgc_refcount;
@@ -564,24 +564,24 @@ struct obd_id_info {
 /* */
 
 struct echo_obd {
-        struct obd_device_target eo_obt;
-        struct obdo              eo_oa;
-        cfs_spinlock_t           eo_lock;
-        __u64                    eo_lastino;
-        struct lustre_handle     eo_nl_lock;
-        cfs_atomic_t             eo_prep;
+       struct obd_device_target eo_obt;
+       struct obdo             eo_oa;
+       spinlock_t               eo_lock;
+       __u64                    eo_lastino;
+       struct lustre_handle    eo_nl_lock;
+       cfs_atomic_t            eo_prep;
 };
 
 struct ost_obd {
-        struct ptlrpc_service *ost_service;
-        struct ptlrpc_service *ost_create_service;
-        struct ptlrpc_service *ost_io_service;
-        cfs_mutex_t            ost_health_mutex;
+       struct ptlrpc_service   *ost_service;
+       struct ptlrpc_service   *ost_create_service;
+       struct ptlrpc_service   *ost_io_service;
+       struct mutex            ost_health_mutex;
 };
 
 struct echo_client_obd {
-        struct obd_export   *ec_exp;   /* the local connection to osc/lov */
-        cfs_spinlock_t       ec_lock;
+       struct obd_export       *ec_exp;   /* the local connection to osc/lov */
+       spinlock_t              ec_lock;
         cfs_list_t           ec_objects;
         cfs_list_t           ec_locks;
         int                  ec_nstripes;
@@ -613,7 +613,7 @@ struct ost_pool {
                                                    lov_obd->lov_tgts */
         unsigned int        op_count;      /* number of OSTs in the array */
         unsigned int        op_size;       /* allocated size of lp_array */
-        cfs_rw_semaphore_t  op_rw_sem;     /* to protect ost_pool use */
+       struct rw_semaphore op_rw_sem;     /* to protect ost_pool use */
 };
 
 /* Round-robin allocator data */
@@ -635,7 +635,7 @@ struct lov_statfs_data {
 /* Stripe placement optimization */
 struct lov_qos {
         cfs_list_t          lq_oss_list; /* list of OSSs that targets use */
-        cfs_rw_semaphore_t  lq_rw_sem;
+       struct rw_semaphore lq_rw_sem;
         __u32               lq_active_oss_count;
         unsigned int        lq_prio_free;   /* priority for free space */
         unsigned int        lq_threshold_rr;/* priority for rr */
@@ -688,7 +688,7 @@ struct lov_obd {
         struct lov_tgt_desc   **lov_tgts;              /* sparse array */
         struct ost_pool         lov_packed;            /* all OSTs in a packed
                                                           array */
-        cfs_mutex_t             lov_lock;
+       struct mutex            lov_lock;
         struct obd_connect_data lov_ocd;
         cfs_atomic_t            lov_refcount;
         __u32                   lov_tgt_count;         /* how many OBD's */
@@ -711,7 +711,7 @@ struct lmv_tgt_desc {
         struct obd_export      *ltd_exp;
         int                     ltd_active; /* is this target up for requests */
         int                     ltd_idx;
-        cfs_mutex_t             ltd_fid_mutex;
+       struct mutex            ltd_fid_mutex;
 };
 
 enum placement_policy {
@@ -724,9 +724,9 @@ enum placement_policy {
 typedef enum placement_policy placement_policy_t;
 
 struct lmv_obd {
-        int                     refcount;
-        struct lu_client_fld    lmv_fld;
-        cfs_spinlock_t          lmv_lock;
+       int                     refcount;
+       struct lu_client_fld    lmv_fld;
+       spinlock_t              lmv_lock;
         placement_policy_t      lmv_placement;
         struct lmv_desc         desc;
         struct obd_uuid         cluuid;
@@ -737,7 +737,7 @@ struct lmv_obd {
         int                     max_def_easize;
         int                     max_cookiesize;
         int                     server_timeout;
-        cfs_mutex_t             init_mutex;
+       struct mutex            init_mutex;
 
         struct lmv_tgt_desc     *tgts;
         int                     tgts_size;
@@ -952,10 +952,10 @@ struct obd_notify_upcall {
 };
 
 struct target_recovery_data {
-        svc_handler_t     trd_recovery_handler;
-        pid_t             trd_processing_task;
-        cfs_completion_t  trd_starting;
-        cfs_completion_t  trd_finishing;
+       svc_handler_t           trd_recovery_handler;
+       pid_t                   trd_processing_task;
+       struct completion       trd_starting;
+       struct completion       trd_finishing;
 };
 
 /**
@@ -994,10 +994,10 @@ struct obd_llog_group {
         int                olg_seq;
         struct llog_ctxt  *olg_ctxts[LLOG_MAX_CTXTS];
         cfs_waitq_t        olg_waitq;
-        cfs_spinlock_t     olg_lock;
-        struct obd_export *olg_exp;
-        int                olg_initializing;
-        cfs_mutex_t        olg_cat_processing;
+       spinlock_t         olg_lock;
+       struct obd_export *olg_exp;
+       int                olg_initializing;
+       struct mutex       olg_cat_processing;
 };
 
 /* corresponds to one of the obd's */
@@ -1050,21 +1050,21 @@ struct obd_device {
         cfs_list_t              obd_unlinked_exports;
         cfs_list_t              obd_delayed_exports;
         int                     obd_num_exports;
-        cfs_spinlock_t          obd_nid_lock;
-        struct ldlm_namespace  *obd_namespace;
-        struct ptlrpc_client    obd_ldlm_client; /* XXX OST/MDS only */
-        /* a spinlock is OK for what we do now, may need a semaphore later */
-        cfs_spinlock_t          obd_dev_lock; /* protects obd bitfield above */
-        cfs_mutex_t             obd_dev_mutex;
-        __u64                   obd_last_committed;
-        struct fsfilt_operations *obd_fsops;
-        cfs_spinlock_t          obd_osfs_lock;
-        struct obd_statfs       obd_osfs;       /* locked by obd_osfs_lock */
-        __u64                   obd_osfs_age;
-        struct lvfs_run_ctxt    obd_lvfs_ctxt;
-        struct obd_llog_group   obd_olg; /* default llog group */
-        struct obd_device      *obd_observer;
-        cfs_rw_semaphore_t      obd_observer_link_sem;
+       spinlock_t              obd_nid_lock;
+       struct ldlm_namespace  *obd_namespace;
+       struct ptlrpc_client    obd_ldlm_client; /* XXX OST/MDS only */
+       /* a spinlock is OK for what we do now, may need a semaphore later */
+       spinlock_t              obd_dev_lock; /* protect OBD bitfield above */
+       struct mutex            obd_dev_mutex;
+       __u64                   obd_last_committed;
+       struct fsfilt_operations *obd_fsops;
+       spinlock_t              obd_osfs_lock;
+       struct obd_statfs       obd_osfs;       /* locked by obd_osfs_lock */
+       __u64                   obd_osfs_age;
+       struct lvfs_run_ctxt    obd_lvfs_ctxt;
+       struct obd_llog_group   obd_olg;        /* default llog group */
+       struct obd_device       *obd_observer;
+       struct rw_semaphore     obd_observer_link_sem;
         struct obd_notify_upcall obd_upcall;
         struct obd_export       *obd_self_export;
         /* list of exports in LRU order, for ping evictor, with obd_dev_lock */
@@ -1077,7 +1077,7 @@ struct obd_device {
         int                              obd_delayed_clients;
         /* this lock protects all recovery list_heads, timer and
          * obd_next_recovery_transno value */
-        cfs_spinlock_t                   obd_recovery_task_lock;
+       spinlock_t                       obd_recovery_task_lock;
         __u64                            obd_next_recovery_transno;
         int                              obd_replayed_requests;
         int                              obd_requests_queued_for_recovery;
@@ -1129,7 +1129,7 @@ struct obd_device {
         /**
          * Ldlm pool part. Save last calculated SLV and Limit.
          */
-        cfs_rwlock_t           obd_pool_lock;
+       rwlock_t                obd_pool_lock;
         int                    obd_pool_limit;
         __u64                  obd_pool_slv;
 
index 2bcbd90..50abf03 100644 (file)
@@ -73,7 +73,7 @@
 
 /* OBD Device Declarations */
 extern struct obd_device *obd_devs[MAX_OBD_DEVICES];
-extern cfs_rwlock_t obd_dev_lock;
+extern rwlock_t obd_dev_lock;
 
 /* OBD Operations Declarations */
 extern struct obd_device *class_conn2obd(struct lustre_handle *);
@@ -196,7 +196,7 @@ struct config_llog_data {
         struct config_llog_data    *cld_sptlrpc;/* depended sptlrpc log */
         struct config_llog_data    *cld_recover;    /* imperative recover log */
         struct obd_export          *cld_mgcexp;
-        cfs_mutex_t                 cld_lock;
+       struct mutex                cld_lock;
         int                         cld_type;
         unsigned int                cld_stopping:1, /* we were told to stop
                                                      * watching */
@@ -662,7 +662,7 @@ static inline void obd_cleanup_client_import(struct obd_device *obd)
 
         /* If we set up but never connected, the
            client import will not have been cleaned. */
-        cfs_down_write(&obd->u.cli.cl_sem);
+       down_write(&obd->u.cli.cl_sem);
         if (obd->u.cli.cl_import) {
                 struct obd_import *imp;
                 imp = obd->u.cli.cl_import;
@@ -676,7 +676,7 @@ static inline void obd_cleanup_client_import(struct obd_device *obd)
                 client_destroy_import(imp);
                 obd->u.cli.cl_import = NULL;
         }
-        cfs_up_write(&obd->u.cli.cl_sem);
+       up_write(&obd->u.cli.cl_sem);
 
         EXIT;
 }
@@ -1251,9 +1251,9 @@ static inline int obd_statfs_async(struct obd_export *exp,
                        obd->obd_name, &obd->obd_osfs,
                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
-                cfs_spin_lock(&obd->obd_osfs_lock);
-                memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
-                cfs_spin_unlock(&obd->obd_osfs_lock);
+               spin_lock(&obd->obd_osfs_lock);
+               memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
+               spin_unlock(&obd->obd_osfs_lock);
                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
                 if (oinfo->oi_cb_up)
                         oinfo->oi_cb_up(oinfo, 0);
@@ -1305,22 +1305,22 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
         if (cfs_time_before_64(obd->obd_osfs_age, max_age)) {
                 rc = OBP(obd, statfs)(env, exp, osfs, max_age, flags);
                 if (rc == 0) {
-                        cfs_spin_lock(&obd->obd_osfs_lock);
-                        memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
-                        obd->obd_osfs_age = cfs_time_current_64();
-                        cfs_spin_unlock(&obd->obd_osfs_lock);
-                }
-        } else {
-                CDEBUG(D_SUPER,"%s: use %p cache blocks "LPU64"/"LPU64
-                       " objects "LPU64"/"LPU64"\n",
-                       obd->obd_name, &obd->obd_osfs,
-                       obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
-                       obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
-                cfs_spin_lock(&obd->obd_osfs_lock);
-                memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
-                cfs_spin_unlock(&obd->obd_osfs_lock);
-        }
-        RETURN(rc);
+                       spin_lock(&obd->obd_osfs_lock);
+                       memcpy(&obd->obd_osfs, osfs, sizeof(obd->obd_osfs));
+                       obd->obd_osfs_age = cfs_time_current_64();
+                       spin_unlock(&obd->obd_osfs_lock);
+               }
+       } else {
+               CDEBUG(D_SUPER, "%s: use %p cache blocks "LPU64"/"LPU64
+                      " objects "LPU64"/"LPU64"\n",
+                      obd->obd_name, &obd->obd_osfs,
+                      obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
+                      obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
+               spin_lock(&obd->obd_osfs_lock);
+               memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
+               spin_unlock(&obd->obd_osfs_lock);
+       }
+       RETURN(rc);
 }
 
 static inline int obd_sync_rqset(struct obd_export *exp, struct obd_info *oinfo,
@@ -1757,13 +1757,13 @@ static inline int obd_register_observer(struct obd_device *obd,
 {
         ENTRY;
         OBD_CHECK_DEV(obd);
-        cfs_down_write(&obd->obd_observer_link_sem);
+       down_write(&obd->obd_observer_link_sem);
         if (obd->obd_observer && observer) {
-                cfs_up_write(&obd->obd_observer_link_sem);
+               up_write(&obd->obd_observer_link_sem);
                 RETURN(-EALREADY);
         }
         obd->obd_observer = observer;
-        cfs_up_write(&obd->obd_observer_link_sem);
+       up_write(&obd->obd_observer_link_sem);
         RETURN(0);
 }
 
@@ -1771,10 +1771,10 @@ static inline int obd_pin_observer(struct obd_device *obd,
                                    struct obd_device **observer)
 {
         ENTRY;
-        cfs_down_read(&obd->obd_observer_link_sem);
+       down_read(&obd->obd_observer_link_sem);
         if (!obd->obd_observer) {
                 *observer = NULL;
-                cfs_up_read(&obd->obd_observer_link_sem);
+               up_read(&obd->obd_observer_link_sem);
                 RETURN(-ENOENT);
         }
         *observer = obd->obd_observer;
@@ -1784,7 +1784,7 @@ static inline int obd_pin_observer(struct obd_device *obd,
 static inline int obd_unpin_observer(struct obd_device *obd)
 {
         ENTRY;
-        cfs_up_read(&obd->obd_observer_link_sem);
+       up_read(&obd->obd_observer_link_sem);
         RETURN(0);
 }
 
index 9f97e75..0eaf365 100644 (file)
@@ -289,7 +289,7 @@ static struct lu_env *ccc_inode_fini_env = NULL;
  * A mutex serializing calls to slp_inode_fini() under extreme memory
  * pressure, when environments cannot be allocated.
  */
-static CFS_DEFINE_MUTEX(ccc_inode_fini_guard);
+static DEFINE_MUTEX(ccc_inode_fini_guard);
 static int dummy_refcheck;
 
 int ccc_global_init(struct lu_device_type *device_type)
@@ -1262,7 +1262,7 @@ void cl_inode_fini(struct inode *inode)
                 env = cl_env_get(&refcheck);
                 emergency = IS_ERR(env);
                 if (emergency) {
-                        cfs_mutex_lock(&ccc_inode_fini_guard);
+                       mutex_lock(&ccc_inode_fini_guard);
                         LASSERT(ccc_inode_fini_env != NULL);
                         cl_env_implant(ccc_inode_fini_env, &refcheck);
                         env = ccc_inode_fini_env;
@@ -1278,7 +1278,7 @@ void cl_inode_fini(struct inode *inode)
                 lli->lli_clob = NULL;
                 if (emergency) {
                         cl_env_unplant(ccc_inode_fini_env, &refcheck);
-                        cfs_mutex_unlock(&ccc_inode_fini_guard);
+                       mutex_unlock(&ccc_inode_fini_guard);
                 } else
                         cl_env_put(env, &refcheck);
                 cl_env_reexit(cookie);
index 8f7ffc9..9a40ac0 100644 (file)
@@ -102,13 +102,13 @@ int cl_ocd_update(struct obd_device *host,
                 flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
                 CDEBUG(D_SUPER, "Changing connect_flags: "LPX64" -> "LPX64"\n",
                        lco->lco_flags, flags);
-                cfs_mutex_lock(&lco->lco_lock);
+               mutex_lock(&lco->lco_lock);
                 lco->lco_flags &= flags;
                 /* for each osc event update ea size */
                 if (lco->lco_dt_exp)
                         cl_init_ea_size(lco->lco_md_exp, lco->lco_dt_exp);
 
-                cfs_mutex_unlock(&lco->lco_lock);
+               mutex_unlock(&lco->lco_lock);
                 result = 0;
         } else {
                 CERROR("unexpected notification from %s %s!\n",
index 124f834..a0013e1 100644 (file)
@@ -52,7 +52,7 @@ struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock)
 {
        /* on server-side resource of lock doesn't change */
        if (!lock->l_ns_srv)
-               cfs_spin_lock(&lock->l_lock);
+               spin_lock(&lock->l_lock);
 
        lock_res(lock->l_resource);
 
@@ -68,6 +68,6 @@ void unlock_res_and_lock(struct ldlm_lock *lock)
 
        unlock_res(lock->l_resource);
        if (!lock->l_ns_srv)
-               cfs_spin_unlock(&lock->l_lock);
+               spin_unlock(&lock->l_lock);
 }
 EXPORT_SYMBOL(unlock_res_and_lock);
index f69c80c..f61e300 100644 (file)
@@ -613,9 +613,9 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
                 imp = obd->u.cli.cl_import;
 
         if (NULL != imp) {
-                cfs_spin_lock(&imp->imp_lock);
-                fwd.fwd_generation = imp->imp_generation;
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               fwd.fwd_generation = imp->imp_generation;
+               spin_unlock(&imp->imp_lock);
         }
 
         lwi = LWI_TIMEOUT_INTR(0, NULL, ldlm_flock_interrupted_wait, &fwd);
index b2ca20d..f0a114a 100644 (file)
@@ -38,9 +38,9 @@
 
 extern cfs_atomic_t ldlm_srv_namespace_nr;
 extern cfs_atomic_t ldlm_cli_namespace_nr;
-extern cfs_mutex_t ldlm_srv_namespace_lock;
+extern struct mutex ldlm_srv_namespace_lock;
 extern cfs_list_t ldlm_srv_namespace_list;
-extern cfs_mutex_t ldlm_cli_namespace_lock;
+extern struct mutex ldlm_cli_namespace_lock;
 extern cfs_list_t ldlm_cli_namespace_list;
 
 static inline cfs_atomic_t *ldlm_namespace_nr(ldlm_side_t client)
@@ -55,7 +55,7 @@ static inline cfs_list_t *ldlm_namespace_list(ldlm_side_t client)
                 &ldlm_srv_namespace_list : &ldlm_cli_namespace_list;
 }
 
-static inline cfs_mutex_t *ldlm_namespace_lock(ldlm_side_t client)
+static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
 {
         return client == LDLM_NAMESPACE_SERVER ?
                 &ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
@@ -227,9 +227,9 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
                 struct ldlm_pool *pl = data;                                \
                 type tmp;                                                   \
                                                                             \
-                cfs_spin_lock(&pl->pl_lock);                                \
-                tmp = pl->pl_##var;                                         \
-                cfs_spin_unlock(&pl->pl_lock);                              \
+               spin_lock(&pl->pl_lock);                                    \
+               tmp = pl->pl_##var;                                         \
+               spin_unlock(&pl->pl_lock);                                  \
                                                                             \
                 return lprocfs_rd_uint(page, start, off, count, eof, &tmp); \
         }                                                                   \
@@ -249,9 +249,9 @@ typedef enum ldlm_policy_res ldlm_policy_res_t;
                         return rc;                                          \
                 }                                                           \
                                                                             \
-                cfs_spin_lock(&pl->pl_lock);                                \
-                pl->pl_##var = tmp;                                         \
-                cfs_spin_unlock(&pl->pl_lock);                              \
+               spin_lock(&pl->pl_lock);                                    \
+               pl->pl_##var = tmp;                                         \
+               spin_unlock(&pl->pl_lock);                                  \
                                                                             \
                 return rc;                                                  \
         }                                                                   \
index a7ada1b..cdf753c 100644 (file)
@@ -77,7 +77,7 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
                 }
         }
 
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
         cfs_list_for_each_entry(item, &imp->imp_conn_list, oic_item) {
                 if (obd_uuid_equals(uuid, &item->oic_uuid)) {
                         if (priority) {
@@ -89,7 +89,7 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
                         CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
                                imp, imp->imp_obd->obd_name, uuid->uuid,
                                (priority ? ", moved to head" : ""));
-                        cfs_spin_unlock(&imp->imp_lock);
+                       spin_unlock(&imp->imp_lock);
                         GOTO(out_free, rc = 0);
                 }
         }
@@ -107,11 +107,11 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
                        imp, imp->imp_obd->obd_name, uuid->uuid,
                        (priority ? "head" : "tail"));
         } else {
-                cfs_spin_unlock(&imp->imp_lock);
-                GOTO(out_free, rc = -ENOENT);
-        }
+               spin_unlock(&imp->imp_lock);
+               GOTO(out_free, rc = -ENOENT);
+       }
 
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
         RETURN(0);
 out_free:
         if (imp_conn)
@@ -135,12 +135,12 @@ EXPORT_SYMBOL(client_import_add_conn);
 
 int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
 {
-        struct obd_import_conn *imp_conn;
-        struct obd_export *dlmexp;
-        int rc = -ENOENT;
-        ENTRY;
+       struct obd_import_conn *imp_conn;
+       struct obd_export *dlmexp;
+       int rc = -ENOENT;
+       ENTRY;
 
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
         if (cfs_list_empty(&imp->imp_conn_list)) {
                 LASSERT(!imp->imp_connection);
                 GOTO(out, rc);
@@ -182,10 +182,10 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
                 break;
         }
 out:
-        cfs_spin_unlock(&imp->imp_lock);
-        if (rc == -ENOENT)
-                CERROR("connection %s not found\n", uuid->uuid);
-        RETURN(rc);
+       spin_unlock(&imp->imp_lock);
+       if (rc == -ENOENT)
+               CERROR("connection %s not found\n", uuid->uuid);
+       RETURN(rc);
 }
 EXPORT_SYMBOL(client_import_del_conn);
 
@@ -194,13 +194,13 @@ EXPORT_SYMBOL(client_import_del_conn);
  * to find a conn uuid of @imp which can reach @peer.
  */
 int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
-                            struct obd_uuid *uuid)
+                           struct obd_uuid *uuid)
 {
-        struct obd_import_conn *conn;
-        int rc = -ENOENT;
-        ENTRY;
+       struct obd_import_conn *conn;
+       int rc = -ENOENT;
+       ENTRY;
 
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
         cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
                 /* check if conn uuid does have this peer nid */
                 if (class_check_uuid(&conn->oic_uuid, peer)) {
@@ -209,8 +209,8 @@ int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
                         break;
                 }
         }
-        cfs_spin_unlock(&imp->imp_lock);
-        RETURN(rc);
+       spin_unlock(&imp->imp_lock);
+       RETURN(rc);
 }
 EXPORT_SYMBOL(client_import_find_conn);
 
@@ -331,8 +331,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
                 RETURN(-EINVAL);
         }
 
-        cfs_init_rwsem(&cli->cl_sem);
-        cfs_sema_init(&cli->cl_mgc_sem, 1);
+       init_rwsem(&cli->cl_sem);
+       sema_init(&cli->cl_mgc_sem, 1);
         cli->cl_conn_count = 0;
         memcpy(server_uuid.uuid, lustre_cfg_buf(lcfg, 2),
                min_t(unsigned int, LUSTRE_CFG_BUFLEN(lcfg, 2),
@@ -352,15 +352,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
         client_obd_list_lock_init(&cli->cl_loi_list_lock);
        cfs_atomic_set(&cli->cl_pending_w_pages, 0);
        cfs_atomic_set(&cli->cl_pending_r_pages, 0);
-        cli->cl_r_in_flight = 0;
-        cli->cl_w_in_flight = 0;
+       cli->cl_r_in_flight = 0;
+       cli->cl_w_in_flight = 0;
 
-        cfs_spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
-        cfs_spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
-        cfs_spin_lock_init(&cli->cl_read_page_hist.oh_lock);
-        cfs_spin_lock_init(&cli->cl_write_page_hist.oh_lock);
-        cfs_spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
-        cfs_spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
+       spin_lock_init(&cli->cl_read_rpc_hist.oh_lock);
+       spin_lock_init(&cli->cl_write_rpc_hist.oh_lock);
+       spin_lock_init(&cli->cl_read_page_hist.oh_lock);
+       spin_lock_init(&cli->cl_write_page_hist.oh_lock);
+       spin_lock_init(&cli->cl_read_offset_hist.oh_lock);
+       spin_lock_init(&cli->cl_write_offset_hist.oh_lock);
 
        /* lru for osc. */
        CFS_INIT_LIST_HEAD(&cli->cl_lru_osc);
@@ -438,9 +438,9 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
                         CDEBUG(D_HA, "marking %s %s->%s as inactive\n",
                                name, obddev->obd_name,
                                cli->cl_target_uuid.uuid);
-                        cfs_spin_lock(&imp->imp_lock);
-                        imp->imp_deactive = 1;
-                        cfs_spin_unlock(&imp->imp_lock);
+                       spin_lock(&imp->imp_lock);
+                       imp->imp_deactive = 1;
+                       spin_unlock(&imp->imp_lock);
                 }
         }
 
@@ -496,7 +496,7 @@ int client_connect_import(const struct lu_env *env,
         ENTRY;
 
         *exp = NULL;
-        cfs_down_write(&cli->cl_sem);
+       down_write(&cli->cl_sem);
         if (cli->cl_conn_count > 0 )
                 GOTO(out_sem, rc = -EALREADY);
 
@@ -545,7 +545,7 @@ out_ldlm:
                 *exp = NULL;
         }
 out_sem:
-        cfs_up_write(&cli->cl_sem);
+       up_write(&cli->cl_sem);
 
         return rc;
 }
@@ -568,7 +568,7 @@ int client_disconnect_export(struct obd_export *exp)
         cli = &obd->u.cli;
         imp = cli->cl_import;
 
-        cfs_down_write(&cli->cl_sem);
+       down_write(&cli->cl_sem);
         CDEBUG(D_INFO, "disconnect %s - %d\n", obd->obd_name,
                cli->cl_conn_count);
 
@@ -585,9 +585,9 @@ int client_disconnect_export(struct obd_export *exp)
         /* Mark import deactivated now, so we don't try to reconnect if any
          * of the cleanup RPCs fails (e.g. ldlm cancel, etc).  We don't
          * fully deactivate the import, or that would drop all requests. */
-        cfs_spin_lock(&imp->imp_lock);
-        imp->imp_deactive = 1;
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       imp->imp_deactive = 1;
+       spin_unlock(&imp->imp_lock);
 
         /* Some non-replayable imports (MDS's OSCs) are pinged, so just
          * delete it regardless.  (It's safe to delete an import that was
@@ -605,9 +605,9 @@ int client_disconnect_export(struct obd_export *exp)
          * there's no need to hold sem during disconnecting an import,
          * and actually it may cause deadlock in gss.
          */
-        cfs_up_write(&cli->cl_sem);
-        rc = ptlrpc_disconnect_import(imp, 0);
-        cfs_down_write(&cli->cl_sem);
+       up_write(&cli->cl_sem);
+       rc = ptlrpc_disconnect_import(imp, 0);
+       down_write(&cli->cl_sem);
 
         ptlrpc_invalidate_import(imp);
 
@@ -620,7 +620,7 @@ int client_disconnect_export(struct obd_export *exp)
         if (!rc && err)
                 rc = err;
 
-        cfs_up_write(&cli->cl_sem);
+       up_write(&cli->cl_sem);
 
         RETURN(rc);
 }
@@ -642,23 +642,23 @@ int server_disconnect_export(struct obd_export *exp)
                 ldlm_cancel_locks_for_export(exp);
 
         /* complete all outstanding replies */
-        cfs_spin_lock(&exp->exp_lock);
-        while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
-                struct ptlrpc_reply_state *rs =
-                        cfs_list_entry(exp->exp_outstanding_replies.next,
-                                       struct ptlrpc_reply_state, rs_exp_list);
+       spin_lock(&exp->exp_lock);
+       while (!cfs_list_empty(&exp->exp_outstanding_replies)) {
+               struct ptlrpc_reply_state *rs =
+                       cfs_list_entry(exp->exp_outstanding_replies.next,
+                                      struct ptlrpc_reply_state, rs_exp_list);
                struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
 
-               cfs_spin_lock(&svcpt->scp_rep_lock);
+               spin_lock(&svcpt->scp_rep_lock);
 
                cfs_list_del_init(&rs->rs_exp_list);
-               cfs_spin_lock(&rs->rs_lock);
+               spin_lock(&rs->rs_lock);
                ptlrpc_schedule_difficult_reply(rs);
-               cfs_spin_unlock(&rs->rs_lock);
+               spin_unlock(&rs->rs_lock);
 
-               cfs_spin_unlock(&svcpt->scp_rep_lock);
+               spin_unlock(&svcpt->scp_rep_lock);
        }
-       cfs_spin_unlock(&exp->exp_lock);
+       spin_unlock(&exp->exp_lock);
 
        RETURN(rc);
 }
@@ -738,10 +738,10 @@ void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
         CDEBUG(D_RPCTRACE, "%s: committing for initial connect of %s\n",
                obd->obd_name, exp->exp_client_uuid.uuid);
 
-        cfs_spin_lock(&exp->exp_lock);
-        exp->exp_need_sync = 0;
-        cfs_spin_unlock(&exp->exp_lock);
-        class_export_cb_put(exp);
+       spin_lock(&exp->exp_lock);
+       exp->exp_need_sync = 0;
+       spin_unlock(&exp->exp_lock);
+       class_export_cb_put(exp);
 }
 EXPORT_SYMBOL(target_client_add_cb);
 
@@ -798,9 +798,9 @@ int target_handle_connect(struct ptlrpc_request *req)
                GOTO(out, rc = -ENODEV);
        }
 
-       cfs_spin_lock(&target->obd_dev_lock);
+       spin_lock(&target->obd_dev_lock);
        if (target->obd_stopping || !target->obd_set_up) {
-               cfs_spin_unlock(&target->obd_dev_lock);
+               spin_unlock(&target->obd_dev_lock);
 
                deuuidify(str, NULL, &target_start, &target_len);
                LCONSOLE_ERROR_MSG(0x137, "%.*s: Not available for connect "
@@ -812,7 +812,7 @@ int target_handle_connect(struct ptlrpc_request *req)
        }
 
         if (target->obd_no_conn) {
-               cfs_spin_unlock(&target->obd_dev_lock);
+               spin_unlock(&target->obd_dev_lock);
 
                 LCONSOLE_WARN("%s: Temporarily refusing client connection "
                               "from %s\n", target->obd_name,
@@ -826,7 +826,7 @@ int target_handle_connect(struct ptlrpc_request *req)
         targref = class_incref(target, __FUNCTION__, cfs_current());
 
        target->obd_conn_inprogress++;
-       cfs_spin_unlock(&target->obd_dev_lock);
+       spin_unlock(&target->obd_dev_lock);
 
         str = req_capsule_client_get(&req->rq_pill, &RMF_CLUUID);
         if (str == NULL) {
@@ -913,18 +913,18 @@ int target_handle_connect(struct ptlrpc_request *req)
 
         /* we've found an export in the hash */
 
-       cfs_spin_lock(&export->exp_lock);
-
-        if (export->exp_connecting) { /* bug 9635, et. al. */
-               cfs_spin_unlock(&export->exp_lock);
-                LCONSOLE_WARN("%s: Export %p already connecting from %s\n",
-                              export->exp_obd->obd_name, export,
-                              libcfs_nid2str(req->rq_peer.nid));
-                class_export_put(export);
-                export = NULL;
-                rc = -EALREADY;
-        } else if (mds_conn && export->exp_connection) {
-               cfs_spin_unlock(&export->exp_lock);
+       spin_lock(&export->exp_lock);
+
+       if (export->exp_connecting) { /* bug 9635, et. al. */
+               spin_unlock(&export->exp_lock);
+               LCONSOLE_WARN("%s: Export %p already connecting from %s\n",
+                             export->exp_obd->obd_name, export,
+                             libcfs_nid2str(req->rq_peer.nid));
+               class_export_put(export);
+               export = NULL;
+               rc = -EALREADY;
+       } else if (mds_conn && export->exp_connection) {
+               spin_unlock(&export->exp_lock);
                 if (req->rq_peer.nid != export->exp_connection->c_peer.nid)
                         /* mds reconnected after failover */
                         LCONSOLE_WARN("%s: Received MDS connection from "
@@ -944,7 +944,7 @@ int target_handle_connect(struct ptlrpc_request *req)
                    req->rq_peer.nid != export->exp_connection->c_peer.nid &&
                    (lustre_msg_get_op_flags(req->rq_reqmsg) &
                     MSG_CONNECT_INITIAL)) {
-               cfs_spin_unlock(&export->exp_lock);
+               spin_unlock(&export->exp_lock);
                 /* in mds failover we have static uuid but nid can be
                  * changed*/
                 LCONSOLE_WARN("%s: Client %s seen on new nid %s when "
@@ -958,7 +958,7 @@ int target_handle_connect(struct ptlrpc_request *req)
                 export = NULL;
         } else {
                export->exp_connecting = 1;
-               cfs_spin_unlock(&export->exp_lock);
+               spin_unlock(&export->exp_lock);
                LASSERT(export->exp_obd == target);
 
                rc = target_handle_reconnect(&conn, export, &cluuid);
@@ -984,13 +984,13 @@ no_export:
                               target->obd_name, cluuid.uuid,
                               libcfs_nid2str(req->rq_peer.nid),
                               cfs_atomic_read(&export->exp_rpc_count) - 1);
-                cfs_spin_lock(&export->exp_lock);
-                if (req->rq_export->exp_conn_cnt <
-                    lustre_msg_get_conn_cnt(req->rq_reqmsg))
-                        /* try to abort active requests */
-                        req->rq_export->exp_abort_active_req = 1;
-                cfs_spin_unlock(&export->exp_lock);
-                GOTO(out, rc = -EBUSY);
+               spin_lock(&export->exp_lock);
+               if (req->rq_export->exp_conn_cnt <
+                   lustre_msg_get_conn_cnt(req->rq_reqmsg))
+                       /* try to abort active requests */
+                       req->rq_export->exp_abort_active_req = 1;
+               spin_unlock(&export->exp_lock);
+               GOTO(out, rc = -EBUSY);
         } else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
                 if (!strstr(cluuid.uuid, "mdt"))
                         LCONSOLE_WARN("%s: Rejecting reconnect from the "
@@ -1118,9 +1118,9 @@ dont_check_exports:
        /* request takes one export refcount */
        req->rq_export = class_export_get(export);
 
-        cfs_spin_lock(&export->exp_lock);
-        if (export->exp_conn_cnt >= lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
-                cfs_spin_unlock(&export->exp_lock);
+       spin_lock(&export->exp_lock);
+       if (export->exp_conn_cnt >= lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
+               spin_unlock(&export->exp_lock);
                CDEBUG(D_RPCTRACE, "%s: %s already connected at greater "
                       "or equal conn_cnt: %d >= %d\n",
                        cluuid.uuid, libcfs_nid2str(req->rq_peer.nid),
@@ -1136,14 +1136,14 @@ dont_check_exports:
         /* request from liblustre?  Don't evict it for not pinging. */
         if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
                 export->exp_libclient = 1;
-                cfs_spin_unlock(&export->exp_lock);
+               spin_unlock(&export->exp_lock);
 
-                cfs_spin_lock(&target->obd_dev_lock);
-                cfs_list_del_init(&export->exp_obd_chain_timed);
-                cfs_spin_unlock(&target->obd_dev_lock);
-        } else {
-                cfs_spin_unlock(&export->exp_lock);
-        }
+               spin_lock(&target->obd_dev_lock);
+               cfs_list_del_init(&export->exp_obd_chain_timed);
+               spin_unlock(&target->obd_dev_lock);
+       } else {
+               spin_unlock(&export->exp_lock);
+       }
 
         if (export->exp_connection != NULL) {
                 /* Check to see if connection came from another NID */
@@ -1169,17 +1169,17 @@ dont_check_exports:
                 int has_transno;
                 __u64 transno = data->ocd_transno;
 
-                cfs_spin_lock(&export->exp_lock);
+               spin_lock(&export->exp_lock);
                /* possible race with class_disconnect_stale_exports,
                 * export may be already in the eviction process */
                if (export->exp_failed) {
-                       cfs_spin_unlock(&export->exp_lock);
+                       spin_unlock(&export->exp_lock);
                        GOTO(out, rc = -ENODEV);
                }
-                export->exp_in_recovery = 1;
-                export->exp_req_replay_needed = 1;
-                export->exp_lock_replay_needed = 1;
-                cfs_spin_unlock(&export->exp_lock);
+               export->exp_in_recovery = 1;
+               export->exp_req_replay_needed = 1;
+               export->exp_lock_replay_needed = 1;
+               spin_unlock(&export->exp_lock);
 
                 has_transno = !!(lustre_msg_get_op_flags(req->rq_reqmsg) &
                                  MSG_CONNECT_TRANSNO);
@@ -1191,10 +1191,10 @@ dont_check_exports:
                     transno > target->obd_last_committed) {
                         /* another way is to use cmpxchg() so it will be
                          * lock free */
-                        cfs_spin_lock(&target->obd_recovery_task_lock);
-                        if (transno < target->obd_next_recovery_transno)
-                                target->obd_next_recovery_transno = transno;
-                        cfs_spin_unlock(&target->obd_recovery_task_lock);
+                       spin_lock(&target->obd_recovery_task_lock);
+                       if (transno < target->obd_next_recovery_transno)
+                               target->obd_next_recovery_transno = transno;
+                       spin_unlock(&target->obd_recovery_task_lock);
                 }
 
                 cfs_atomic_inc(&target->obd_req_replay_clients);
@@ -1220,12 +1220,12 @@ dont_check_exports:
                GOTO(out, rc = -ENOTCONN);
        }
 
-       cfs_spin_lock(&export->exp_lock);
+       spin_lock(&export->exp_lock);
        if (export->exp_imp_reverse != NULL)
                /* destroyed import can be still referenced in ctxt */
                tmp_imp = export->exp_imp_reverse;
        export->exp_imp_reverse = revimp;
-       cfs_spin_unlock(&export->exp_lock);
+       spin_unlock(&export->exp_lock);
 
         revimp->imp_connection = ptlrpc_connection_addref(export->exp_connection);
         revimp->imp_client = &export->exp_obd->obd_ldlm_client;
@@ -1252,9 +1252,9 @@ dont_check_exports:
        rc = sptlrpc_import_sec_adapt(revimp, req->rq_svc_ctx, &req->rq_flvr);
        if (rc) {
                CERROR("Failed to get sec for reverse import: %d\n", rc);
-               cfs_spin_lock(&export->exp_lock);
+               spin_lock(&export->exp_lock);
                export->exp_imp_reverse = NULL;
-               cfs_spin_unlock(&export->exp_lock);
+               spin_unlock(&export->exp_lock);
                class_destroy_import(revimp);
        }
 
@@ -1263,23 +1263,23 @@ dont_check_exports:
 out:
        if (tmp_imp != NULL)
                client_destroy_import(tmp_imp);
-        if (export) {
-                cfs_spin_lock(&export->exp_lock);
-                export->exp_connecting = 0;
-                cfs_spin_unlock(&export->exp_lock);
+       if (export) {
+               spin_lock(&export->exp_lock);
+               export->exp_connecting = 0;
+               spin_unlock(&export->exp_lock);
 
-                class_export_put(export);
-        }
-        if (targref) {
-               cfs_spin_lock(&target->obd_dev_lock);
+               class_export_put(export);
+       }
+       if (targref) {
+               spin_lock(&target->obd_dev_lock);
                target->obd_conn_inprogress--;
-               cfs_spin_unlock(&target->obd_dev_lock);
+               spin_unlock(&target->obd_dev_lock);
 
-                class_decref(targref, __FUNCTION__, cfs_current());
+               class_decref(targref, __func__, cfs_current());
        }
-        if (rc)
-                req->rq_status = rc;
-        RETURN(rc);
+       if (rc)
+               req->rq_status = rc;
+       RETURN(rc);
 }
 EXPORT_SYMBOL(target_handle_connect);
 
@@ -1304,12 +1304,12 @@ void target_destroy_export(struct obd_export *exp)
        struct obd_import       *imp = NULL;
        /* exports created from last_rcvd data, and "fake"
           exports created by lctl don't have an import */
-       cfs_spin_lock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
        if (exp->exp_imp_reverse != NULL) {
                imp = exp->exp_imp_reverse;
                exp->exp_imp_reverse = NULL;
        }
-       cfs_spin_unlock(&exp->exp_lock);
+       spin_unlock(&exp->exp_lock);
        if (imp != NULL)
                client_destroy_import(imp);
 
@@ -1354,7 +1354,7 @@ static int target_exp_enqueue_req_replay(struct ptlrpc_request *req)
 
         LASSERT(exp);
 
-        cfs_spin_lock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
         cfs_list_for_each_entry(reqiter, &exp->exp_req_replay_queue,
                                 rq_replay_list) {
                 if (lustre_msg_get_transno(reqiter->rq_reqmsg) == transno) {
@@ -1374,18 +1374,18 @@ static int target_exp_enqueue_req_replay(struct ptlrpc_request *req)
                                   &exp->exp_req_replay_queue);
         }
 
-        cfs_spin_unlock(&exp->exp_lock);
+       spin_unlock(&exp->exp_lock);
         return dup;
 }
 
 static void target_exp_dequeue_req_replay(struct ptlrpc_request *req)
 {
-        LASSERT(!cfs_list_empty(&req->rq_replay_list));
-        LASSERT(req->rq_export);
+       LASSERT(!cfs_list_empty(&req->rq_replay_list));
+       LASSERT(req->rq_export);
 
-        cfs_spin_lock(&req->rq_export->exp_lock);
-        cfs_list_del_init(&req->rq_replay_list);
-        cfs_spin_unlock(&req->rq_export->exp_lock);
+       spin_lock(&req->rq_export->exp_lock);
+       cfs_list_del_init(&req->rq_replay_list);
+       spin_unlock(&req->rq_export->exp_lock);
 }
 
 #ifdef __KERNEL__
@@ -1407,7 +1407,7 @@ static void target_finish_recovery(struct obd_device *obd)
        }
 
         ldlm_reprocess_all_ns(obd->obd_namespace);
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
+       spin_lock(&obd->obd_recovery_task_lock);
         if (!cfs_list_empty(&obd->obd_req_replay_queue) ||
             !cfs_list_empty(&obd->obd_lock_replay_queue) ||
             !cfs_list_empty(&obd->obd_final_req_queue)) {
@@ -1418,10 +1418,10 @@ static void target_finish_recovery(struct obd_device *obd)
                                "" : "lock ",
                        cfs_list_empty(&obd->obd_final_req_queue) ? \
                                "" : "final ");
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-                LBUG();
-        }
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
+               spin_unlock(&obd->obd_recovery_task_lock);
+               LBUG();
+       }
+       spin_unlock(&obd->obd_recovery_task_lock);
 
         obd->obd_recovery_end = cfs_time_current_sec();
 
@@ -1437,13 +1437,13 @@ static void target_finish_recovery(struct obd_device *obd)
 
 static void abort_req_replay_queue(struct obd_device *obd)
 {
-        struct ptlrpc_request *req, *n;
-        cfs_list_t abort_list;
+       struct ptlrpc_request *req, *n;
+       cfs_list_t abort_list;
 
-        CFS_INIT_LIST_HEAD(&abort_list);
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
+       CFS_INIT_LIST_HEAD(&abort_list);
+       spin_lock(&obd->obd_recovery_task_lock);
+       cfs_list_splice_init(&obd->obd_req_replay_queue, &abort_list);
+       spin_unlock(&obd->obd_recovery_task_lock);
         cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list) {
                 DEBUG_REQ(D_WARNING, req, "aborted:");
                 req->rq_status = -ENOTCONN;
@@ -1458,13 +1458,13 @@ static void abort_req_replay_queue(struct obd_device *obd)
 
 static void abort_lock_replay_queue(struct obd_device *obd)
 {
-        struct ptlrpc_request *req, *n;
-        cfs_list_t abort_list;
+       struct ptlrpc_request *req, *n;
+       cfs_list_t abort_list;
 
-        CFS_INIT_LIST_HEAD(&abort_list);
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
+       CFS_INIT_LIST_HEAD(&abort_list);
+       spin_lock(&obd->obd_recovery_task_lock);
+       cfs_list_splice_init(&obd->obd_lock_replay_queue, &abort_list);
+       spin_unlock(&obd->obd_recovery_task_lock);
         cfs_list_for_each_entry_safe(req, n, &abort_list, rq_list){
                 DEBUG_REQ(D_ERROR, req, "aborted:");
                 req->rq_status = -ENOTCONN;
@@ -1492,30 +1492,30 @@ void target_cleanup_recovery(struct obd_device *obd)
         ENTRY;
 
         CFS_INIT_LIST_HEAD(&clean_list);
-        cfs_spin_lock(&obd->obd_dev_lock);
-        if (!obd->obd_recovering) {
-                cfs_spin_unlock(&obd->obd_dev_lock);
-                EXIT;
-                return;
-        }
-        obd->obd_recovering = obd->obd_abort_recovery = 0;
-        cfs_spin_unlock(&obd->obd_dev_lock);
-
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        target_cancel_recovery_timer(obd);
-        cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
-
-        cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
-                LASSERT(req->rq_reply_state == 0);
-                target_exp_dequeue_req_replay(req);
-                target_request_copy_put(req);
-        }
+       spin_lock(&obd->obd_dev_lock);
+       if (!obd->obd_recovering) {
+               spin_unlock(&obd->obd_dev_lock);
+               EXIT;
+               return;
+       }
+       obd->obd_recovering = obd->obd_abort_recovery = 0;
+       spin_unlock(&obd->obd_dev_lock);
+
+       spin_lock(&obd->obd_recovery_task_lock);
+       target_cancel_recovery_timer(obd);
+       cfs_list_splice_init(&obd->obd_req_replay_queue, &clean_list);
+       spin_unlock(&obd->obd_recovery_task_lock);
+
+       cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list) {
+               LASSERT(req->rq_reply_state == 0);
+               target_exp_dequeue_req_replay(req);
+               target_request_copy_put(req);
+       }
 
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
-        cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
+       spin_lock(&obd->obd_recovery_task_lock);
+       cfs_list_splice_init(&obd->obd_lock_replay_queue, &clean_list);
+       cfs_list_splice_init(&obd->obd_final_req_queue, &clean_list);
+       spin_unlock(&obd->obd_recovery_task_lock);
 
         cfs_list_for_each_entry_safe(req, n, &clean_list, rq_list){
                 LASSERT(req->rq_reply_state == 0);
@@ -1536,26 +1536,26 @@ EXPORT_SYMBOL(target_cancel_recovery_timer);
 
 static void target_start_recovery_timer(struct obd_device *obd)
 {
-        if (obd->obd_recovery_start != 0)
-                return;
+       if (obd->obd_recovery_start != 0)
+               return;
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        if (!obd->obd_recovering || obd->obd_abort_recovery) {
-                cfs_spin_unlock(&obd->obd_dev_lock);
-                return;
-        }
+       spin_lock(&obd->obd_dev_lock);
+       if (!obd->obd_recovering || obd->obd_abort_recovery) {
+               spin_unlock(&obd->obd_dev_lock);
+               return;
+       }
 
-        LASSERT(obd->obd_recovery_timeout != 0);
+       LASSERT(obd->obd_recovery_timeout != 0);
 
-        if (obd->obd_recovery_start != 0) {
-                cfs_spin_unlock(&obd->obd_dev_lock);
-                return;
-        }
+       if (obd->obd_recovery_start != 0) {
+               spin_unlock(&obd->obd_dev_lock);
+               return;
+       }
 
-        cfs_timer_arm(&obd->obd_recovery_timer,
-                      cfs_time_shift(obd->obd_recovery_timeout));
-        obd->obd_recovery_start = cfs_time_current_sec();
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       cfs_timer_arm(&obd->obd_recovery_timer,
+                     cfs_time_shift(obd->obd_recovery_timeout));
+       obd->obd_recovery_start = cfs_time_current_sec();
+       spin_unlock(&obd->obd_dev_lock);
 
         LCONSOLE_WARN("%s: Will be in recovery for at least %d:%.02d, "
                       "or until %d client%s reconnect%s\n",
@@ -1575,14 +1575,14 @@ static void target_start_recovery_timer(struct obd_device *obd)
  */
 static void extend_recovery_timer(struct obd_device *obd, int drt, bool extend)
 {
-        cfs_time_t now;
-        cfs_time_t end;
-        cfs_duration_t left;
-        int to;
-
-        cfs_spin_lock(&obd->obd_dev_lock);
-        if (!obd->obd_recovering || obd->obd_abort_recovery) {
-                cfs_spin_unlock(&obd->obd_dev_lock);
+       cfs_time_t now;
+       cfs_time_t end;
+       cfs_duration_t left;
+       int to;
+
+       spin_lock(&obd->obd_dev_lock);
+       if (!obd->obd_recovering || obd->obd_abort_recovery) {
+               spin_unlock(&obd->obd_dev_lock);
                 return;
         }
         LASSERT(obd->obd_recovery_start != 0);
@@ -1607,10 +1607,10 @@ static void extend_recovery_timer(struct obd_device *obd, int drt, bool extend)
                 cfs_timer_arm(&obd->obd_recovery_timer,
                               cfs_time_shift(drt));
         }
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
-        CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
-               obd->obd_name, (unsigned)drt);
+       CDEBUG(D_HA, "%s: recovery timer will expire in %u seconds\n",
+              obd->obd_name, (unsigned)drt);
 }
 
 /* Reset the timer with each new client connection */
@@ -1704,12 +1704,12 @@ static int check_for_clients(struct obd_device *obd)
 
 static int check_for_next_transno(struct obd_device *obd)
 {
-        struct ptlrpc_request *req = NULL;
-        int wake_up = 0, connected, completed, queue_len;
-        __u64 next_transno, req_transno;
-        ENTRY;
+       struct ptlrpc_request *req = NULL;
+       int wake_up = 0, connected, completed, queue_len;
+       __u64 next_transno, req_transno;
+       ENTRY;
 
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
+       spin_lock(&obd->obd_recovery_task_lock);
         if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
                 req = cfs_list_entry(obd->obd_req_replay_queue.next,
                                      struct ptlrpc_request, rq_list);
@@ -1764,15 +1764,15 @@ static int check_for_next_transno(struct obd_device *obd)
                 obd->obd_next_recovery_transno = req_transno;
                 wake_up = 1;
         }
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
-        return wake_up;
+       spin_unlock(&obd->obd_recovery_task_lock);
+       return wake_up;
 }
 
 static int check_for_next_lock(struct obd_device *obd)
 {
-        int wake_up = 0;
+       int wake_up = 0;
 
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
+       spin_lock(&obd->obd_recovery_task_lock);
         if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
                 CDEBUG(D_HA, "waking for next lock\n");
                 wake_up = 1;
@@ -1786,9 +1786,9 @@ static int check_for_next_lock(struct obd_device *obd)
                 CDEBUG(D_HA, "waking for expired recovery\n");
                 wake_up = 1;
         }
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
+       spin_unlock(&obd->obd_recovery_task_lock);
 
-        return wake_up;
+       return wake_up;
 }
 
 /**
@@ -1815,9 +1815,9 @@ repeat:
                 /** evict cexports with no replay in queue, they are stalled */
                 class_disconnect_stale_exports(obd, health_check);
                 /** continue with VBR */
-                cfs_spin_lock(&obd->obd_dev_lock);
-                obd->obd_version_recov = 1;
-                cfs_spin_unlock(&obd->obd_dev_lock);
+               spin_lock(&obd->obd_dev_lock);
+               obd->obd_version_recov = 1;
+               spin_unlock(&obd->obd_dev_lock);
                 /**
                  * reset timer, recovery will proceed with versions now,
                  * timeout is set just to handle reconnection delays
@@ -1843,21 +1843,21 @@ static struct ptlrpc_request *target_next_replay_req(struct obd_device *obd)
                 abort_lock_replay_queue(obd);
         }
 
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
-                req = cfs_list_entry(obd->obd_req_replay_queue.next,
-                                     struct ptlrpc_request, rq_list);
-                cfs_list_del_init(&req->rq_list);
-                obd->obd_requests_queued_for_recovery--;
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-        } else {
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-                LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
-                LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
-                /** evict exports failed VBR */
-                class_disconnect_stale_exports(obd, exp_vbr_healthy);
-        }
-        RETURN(req);
+       spin_lock(&obd->obd_recovery_task_lock);
+       if (!cfs_list_empty(&obd->obd_req_replay_queue)) {
+               req = cfs_list_entry(obd->obd_req_replay_queue.next,
+                                    struct ptlrpc_request, rq_list);
+               cfs_list_del_init(&req->rq_list);
+               obd->obd_requests_queued_for_recovery--;
+               spin_unlock(&obd->obd_recovery_task_lock);
+       } else {
+               spin_unlock(&obd->obd_recovery_task_lock);
+               LASSERT(cfs_list_empty(&obd->obd_req_replay_queue));
+               LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients) == 0);
+               /** evict exports failed VBR */
+               class_disconnect_stale_exports(obd, exp_vbr_healthy);
+       }
+       RETURN(req);
 }
 
 static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
@@ -1869,14 +1869,14 @@ static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
                                      exp_lock_replay_healthy))
                 abort_lock_replay_queue(obd);
 
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
-                req = cfs_list_entry(obd->obd_lock_replay_queue.next,
-                                     struct ptlrpc_request, rq_list);
-                cfs_list_del_init(&req->rq_list);
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-        } else {
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
+       spin_lock(&obd->obd_recovery_task_lock);
+       if (!cfs_list_empty(&obd->obd_lock_replay_queue)) {
+               req = cfs_list_entry(obd->obd_lock_replay_queue.next,
+                                    struct ptlrpc_request, rq_list);
+               cfs_list_del_init(&req->rq_list);
+               spin_unlock(&obd->obd_recovery_task_lock);
+       } else {
+               spin_unlock(&obd->obd_recovery_task_lock);
                 LASSERT(cfs_list_empty(&obd->obd_lock_replay_queue));
                 LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients) == 0);
                 /** evict exports failed VBR */
@@ -1887,23 +1887,23 @@ static struct ptlrpc_request *target_next_replay_lock(struct obd_device *obd)
 
 static struct ptlrpc_request *target_next_final_ping(struct obd_device *obd)
 {
-        struct ptlrpc_request *req = NULL;
-
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        if (!cfs_list_empty(&obd->obd_final_req_queue)) {
-                req = cfs_list_entry(obd->obd_final_req_queue.next,
-                                     struct ptlrpc_request, rq_list);
-                cfs_list_del_init(&req->rq_list);
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-                if (req->rq_export->exp_in_recovery) {
-                        cfs_spin_lock(&req->rq_export->exp_lock);
-                        req->rq_export->exp_in_recovery = 0;
-                        cfs_spin_unlock(&req->rq_export->exp_lock);
-                }
-        } else {
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-        }
-        return req;
+       struct ptlrpc_request *req = NULL;
+
+       spin_lock(&obd->obd_recovery_task_lock);
+       if (!cfs_list_empty(&obd->obd_final_req_queue)) {
+               req = cfs_list_entry(obd->obd_final_req_queue.next,
+                                    struct ptlrpc_request, rq_list);
+               cfs_list_del_init(&req->rq_list);
+               spin_unlock(&obd->obd_recovery_task_lock);
+               if (req->rq_export->exp_in_recovery) {
+                       spin_lock(&req->rq_export->exp_lock);
+                       req->rq_export->exp_in_recovery = 0;
+                       spin_unlock(&req->rq_export->exp_lock);
+               }
+       } else {
+               spin_unlock(&obd->obd_recovery_task_lock);
+       }
+       return req;
 }
 
 static int handle_recovery_req(struct ptlrpc_thread *thread,
@@ -2017,10 +2017,10 @@ static int target_recovery_thread(void *arg)
                cfs_curproc_pid());
         trd->trd_processing_task = cfs_curproc_pid();
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        obd->obd_recovering = 1;
-        cfs_spin_unlock(&obd->obd_dev_lock);
-        cfs_complete(&trd->trd_starting);
+       spin_lock(&obd->obd_dev_lock);
+       obd->obd_recovering = 1;
+       spin_unlock(&obd->obd_dev_lock);
+       complete(&trd->trd_starting);
 
         /* first of all, we have to know the first transno to replay */
         if (target_recovery_overseer(obd, check_for_clients,
@@ -2045,9 +2045,9 @@ static int target_recovery_thread(void *arg)
                  * bz18031: increase next_recovery_transno before
                  * target_request_copy_put() will drop exp_rpc reference
                  */
-                cfs_spin_lock(&obd->obd_recovery_task_lock);
-                obd->obd_next_recovery_transno++;
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
+               spin_lock(&obd->obd_recovery_task_lock);
+               obd->obd_next_recovery_transno++;
+               spin_unlock(&obd->obd_recovery_task_lock);
                 target_exp_dequeue_req_replay(req);
                 target_request_copy_put(req);
                 obd->obd_replayed_requests++;
@@ -2077,12 +2077,12 @@ static int target_recovery_thread(void *arg)
         tgt_boot_epoch_update(lut);
         /* We drop recoverying flag to forward all new requests
          * to regular mds_handle() since now */
-        cfs_spin_lock(&obd->obd_dev_lock);
-        obd->obd_recovering = obd->obd_abort_recovery = 0;
-        cfs_spin_unlock(&obd->obd_dev_lock);
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        target_cancel_recovery_timer(obd);
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
+       spin_lock(&obd->obd_dev_lock);
+       obd->obd_recovering = obd->obd_abort_recovery = 0;
+       spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_recovery_task_lock);
+       target_cancel_recovery_timer(obd);
+       spin_unlock(&obd->obd_recovery_task_lock);
         while ((req = target_next_final_ping(obd))) {
                 LASSERT(trd->trd_processing_task == cfs_curproc_pid());
                 DEBUG_REQ(D_HA, req, "processing final ping from %s: ",
@@ -2104,7 +2104,7 @@ static int target_recovery_thread(void *arg)
 
         lu_context_fini(&env->le_ctx);
         trd->trd_processing_task = 0;
-        cfs_complete(&trd->trd_finishing);
+       complete(&trd->trd_finishing);
 
         OBD_FREE_PTR(thread);
         OBD_FREE_PTR(env);
@@ -2119,12 +2119,12 @@ static int target_start_recovery_thread(struct lu_target *lut,
         struct target_recovery_data *trd = &obd->obd_recovery_data;
 
         memset(trd, 0, sizeof(*trd));
-        cfs_init_completion(&trd->trd_starting);
-        cfs_init_completion(&trd->trd_finishing);
+       init_completion(&trd->trd_starting);
+       init_completion(&trd->trd_finishing);
         trd->trd_recovery_handler = handler;
 
         if (cfs_create_thread(target_recovery_thread, lut, 0) > 0) {
-                cfs_wait_for_completion(&trd->trd_starting);
+               wait_for_completion(&trd->trd_starting);
                 LASSERT(obd->obd_recovering != 0);
         } else
                 rc = -ECHILD;
@@ -2134,18 +2134,18 @@ static int target_start_recovery_thread(struct lu_target *lut,
 
 void target_stop_recovery_thread(struct obd_device *obd)
 {
-        if (obd->obd_recovery_data.trd_processing_task > 0) {
-                struct target_recovery_data *trd = &obd->obd_recovery_data;
-                /** recovery can be done but postrecovery is not yet */
-                cfs_spin_lock(&obd->obd_dev_lock);
-                if (obd->obd_recovering) {
-                        CERROR("%s: Aborting recovery\n", obd->obd_name);
-                        obd->obd_abort_recovery = 1;
-                        cfs_waitq_signal(&obd->obd_next_transno_waitq);
-                }
-                cfs_spin_unlock(&obd->obd_dev_lock);
-                cfs_wait_for_completion(&trd->trd_finishing);
-        }
+       if (obd->obd_recovery_data.trd_processing_task > 0) {
+               struct target_recovery_data *trd = &obd->obd_recovery_data;
+               /** recovery can be done but postrecovery is not yet */
+               spin_lock(&obd->obd_dev_lock);
+               if (obd->obd_recovering) {
+                       CERROR("%s: Aborting recovery\n", obd->obd_name);
+                       obd->obd_abort_recovery = 1;
+                       cfs_waitq_signal(&obd->obd_next_transno_waitq);
+               }
+               spin_unlock(&obd->obd_dev_lock);
+               wait_for_completion(&trd->trd_finishing);
+       }
 }
 EXPORT_SYMBOL(target_stop_recovery_thread);
 
@@ -2197,36 +2197,36 @@ EXPORT_SYMBOL(target_recovery_init);
 static int target_process_req_flags(struct obd_device *obd,
                                     struct ptlrpc_request *req)
 {
-        struct obd_export *exp = req->rq_export;
-        LASSERT(exp != NULL);
-        if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
-                /* client declares he's ready to replay locks */
-                cfs_spin_lock(&exp->exp_lock);
-                if (exp->exp_req_replay_needed) {
-                        exp->exp_req_replay_needed = 0;
-                        cfs_spin_unlock(&exp->exp_lock);
-
-                        LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
-                        cfs_atomic_dec(&obd->obd_req_replay_clients);
-                } else {
-                        cfs_spin_unlock(&exp->exp_lock);
-                }
-        }
-        if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
-                /* client declares he's ready to complete recovery
-                 * so, we put the request on th final queue */
-                cfs_spin_lock(&exp->exp_lock);
-                if (exp->exp_lock_replay_needed) {
-                        exp->exp_lock_replay_needed = 0;
-                        cfs_spin_unlock(&exp->exp_lock);
-
-                        LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
-                        cfs_atomic_dec(&obd->obd_lock_replay_clients);
-                } else {
-                        cfs_spin_unlock(&exp->exp_lock);
-                }
-        }
-        return 0;
+       struct obd_export *exp = req->rq_export;
+       LASSERT(exp != NULL);
+       if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
+               /* client declares he's ready to replay locks */
+               spin_lock(&exp->exp_lock);
+               if (exp->exp_req_replay_needed) {
+                       exp->exp_req_replay_needed = 0;
+                       spin_unlock(&exp->exp_lock);
+
+                       LASSERT_ATOMIC_POS(&obd->obd_req_replay_clients);
+                       cfs_atomic_dec(&obd->obd_req_replay_clients);
+               } else {
+                       spin_unlock(&exp->exp_lock);
+               }
+       }
+       if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LOCK_REPLAY_DONE) {
+               /* client declares he's ready to complete recovery
+                * so, we put the request on th final queue */
+               spin_lock(&exp->exp_lock);
+               if (exp->exp_lock_replay_needed) {
+                       exp->exp_lock_replay_needed = 0;
+                       spin_unlock(&exp->exp_lock);
+
+                       LASSERT_ATOMIC_POS(&obd->obd_lock_replay_clients);
+                       cfs_atomic_dec(&obd->obd_lock_replay_clients);
+               } else {
+                       spin_unlock(&exp->exp_lock);
+               }
+       }
+       return 0;
 }
 
 int target_queue_recovery_request(struct ptlrpc_request *req,
@@ -2250,36 +2250,36 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                 target_request_copy_get(req);
                 DEBUG_REQ(D_HA, req, "queue final req");
                 cfs_waitq_signal(&obd->obd_next_transno_waitq);
-                cfs_spin_lock(&obd->obd_recovery_task_lock);
-                if (obd->obd_recovering) {
-                        cfs_list_add_tail(&req->rq_list,
-                                          &obd->obd_final_req_queue);
-                } else {
-                        cfs_spin_unlock(&obd->obd_recovery_task_lock);
-                        target_request_copy_put(req);
-                        RETURN(obd->obd_stopping ? -ENOTCONN : 1);
-                }
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-                RETURN(0);
-        }
-        if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
-                /* client declares he's ready to replay locks */
-                target_request_copy_get(req);
-                DEBUG_REQ(D_HA, req, "queue lock replay req");
-                cfs_waitq_signal(&obd->obd_next_transno_waitq);
-                cfs_spin_lock(&obd->obd_recovery_task_lock);
-                LASSERT(obd->obd_recovering);
-                /* usually due to recovery abort */
-                if (!req->rq_export->exp_in_recovery) {
-                        cfs_spin_unlock(&obd->obd_recovery_task_lock);
-                        target_request_copy_put(req);
-                        RETURN(-ENOTCONN);
-                }
-                LASSERT(req->rq_export->exp_lock_replay_needed);
-                cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-                RETURN(0);
-        }
+               spin_lock(&obd->obd_recovery_task_lock);
+               if (obd->obd_recovering) {
+                       cfs_list_add_tail(&req->rq_list,
+                                         &obd->obd_final_req_queue);
+               } else {
+                       spin_unlock(&obd->obd_recovery_task_lock);
+                       target_request_copy_put(req);
+                       RETURN(obd->obd_stopping ? -ENOTCONN : 1);
+               }
+               spin_unlock(&obd->obd_recovery_task_lock);
+               RETURN(0);
+       }
+       if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REQ_REPLAY_DONE) {
+               /* client declares he's ready to replay locks */
+               target_request_copy_get(req);
+               DEBUG_REQ(D_HA, req, "queue lock replay req");
+               cfs_waitq_signal(&obd->obd_next_transno_waitq);
+               spin_lock(&obd->obd_recovery_task_lock);
+               LASSERT(obd->obd_recovering);
+               /* usually due to recovery abort */
+               if (!req->rq_export->exp_in_recovery) {
+                       spin_unlock(&obd->obd_recovery_task_lock);
+                       target_request_copy_put(req);
+                       RETURN(-ENOTCONN);
+               }
+               LASSERT(req->rq_export->exp_lock_replay_needed);
+               cfs_list_add_tail(&req->rq_list, &obd->obd_lock_replay_queue);
+               spin_unlock(&obd->obd_recovery_task_lock);
+               RETURN(0);
+       }
 
         /* CAVEAT EMPTOR: The incoming request message has been swabbed
          * (i.e. buflens etc are in my own byte order), but type-dependent
@@ -2304,14 +2304,14 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
         CDEBUG(D_HA, "Next recovery transno: "LPU64
                ", current: "LPU64", replaying\n",
                obd->obd_next_recovery_transno, transno);
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        if (transno < obd->obd_next_recovery_transno) {
-                /* Processing the queue right now, don't re-add. */
-                LASSERT(cfs_list_empty(&req->rq_list));
-                cfs_spin_unlock(&obd->obd_recovery_task_lock);
-                RETURN(1);
-        }
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
+       spin_lock(&obd->obd_recovery_task_lock);
+       if (transno < obd->obd_next_recovery_transno) {
+               /* Processing the queue right now, don't re-add. */
+               LASSERT(cfs_list_empty(&req->rq_list));
+               spin_unlock(&obd->obd_recovery_task_lock);
+               RETURN(1);
+       }
+       spin_unlock(&obd->obd_recovery_task_lock);
 
         if (OBD_FAIL_CHECK(OBD_FAIL_TGT_REPLAY_DROP))
                 RETURN(0);
@@ -2330,7 +2330,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
         }
 
         /* XXX O(n^2) */
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
+       spin_lock(&obd->obd_recovery_task_lock);
         LASSERT(obd->obd_recovering);
         cfs_list_for_each(tmp, &obd->obd_req_replay_queue) {
                 struct ptlrpc_request *reqiter =
@@ -2346,7 +2346,7 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                              transno)) {
                         DEBUG_REQ(D_ERROR, req, "dropping replay: transno "
                                   "has been claimed by another client");
-                        cfs_spin_unlock(&obd->obd_recovery_task_lock);
+                       spin_unlock(&obd->obd_recovery_task_lock);
                         target_exp_dequeue_req_replay(req);
                         target_request_copy_put(req);
                         RETURN(0);
@@ -2357,9 +2357,9 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                 cfs_list_add_tail(&req->rq_list, &obd->obd_req_replay_queue);
 
         obd->obd_requests_queued_for_recovery++;
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
-        cfs_waitq_signal(&obd->obd_next_transno_waitq);
-        RETURN(0);
+       spin_unlock(&obd->obd_recovery_task_lock);
+       cfs_waitq_signal(&obd->obd_next_transno_waitq);
+       RETURN(0);
 }
 EXPORT_SYMBOL(target_queue_recovery_request);
 
@@ -2413,10 +2413,10 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
          */
         obd = req->rq_export->exp_obd;
 
-        cfs_read_lock(&obd->obd_pool_lock);
+       read_lock(&obd->obd_pool_lock);
         lustre_msg_set_slv(req->rq_repmsg, obd->obd_pool_slv);
         lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
-        cfs_read_unlock(&obd->obd_pool_lock);
+       read_unlock(&obd->obd_pool_lock);
 
         RETURN(0);
 }
@@ -2486,23 +2486,23 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
         rs->rs_export    = exp;
         rs->rs_opc       = lustre_msg_get_opc(req->rq_reqmsg);
 
-        cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
-        CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
-               rs->rs_transno, exp->exp_last_committed);
-        if (rs->rs_transno > exp->exp_last_committed) {
-                /* not committed already */
-                cfs_list_add_tail(&rs->rs_obd_list,
-                                  &exp->exp_uncommitted_replies);
-        }
-        cfs_spin_unlock (&exp->exp_uncommitted_replies_lock);
+       spin_lock(&exp->exp_uncommitted_replies_lock);
+       CDEBUG(D_NET, "rs transno = "LPU64", last committed = "LPU64"\n",
+              rs->rs_transno, exp->exp_last_committed);
+       if (rs->rs_transno > exp->exp_last_committed) {
+               /* not committed already */
+               cfs_list_add_tail(&rs->rs_obd_list,
+                                 &exp->exp_uncommitted_replies);
+       }
+       spin_unlock(&exp->exp_uncommitted_replies_lock);
 
-        cfs_spin_lock(&exp->exp_lock);
-        cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
-        cfs_spin_unlock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
+       cfs_list_add_tail(&rs->rs_exp_list, &exp->exp_outstanding_replies);
+       spin_unlock(&exp->exp_lock);
 
-        netrc = target_send_reply_msg (req, rc, fail_id);
+       netrc = target_send_reply_msg(req, rc, fail_id);
 
-       cfs_spin_lock(&svcpt->scp_rep_lock);
+       spin_lock(&svcpt->scp_rep_lock);
 
        cfs_atomic_inc(&svcpt->scp_nreps_difficult);
 
@@ -2516,19 +2516,19 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
                 ptlrpc_rs_addref(rs);
         }
 
-        cfs_spin_lock(&rs->rs_lock);
-        if (rs->rs_transno <= exp->exp_last_committed ||
-            (!rs->rs_on_net && !rs->rs_no_ack) ||
-             cfs_list_empty(&rs->rs_exp_list) ||     /* completed already */
-             cfs_list_empty(&rs->rs_obd_list)) {
-                CDEBUG(D_HA, "Schedule reply immediately\n");
-                ptlrpc_dispatch_difficult_reply(rs);
-        } else {
+       spin_lock(&rs->rs_lock);
+       if (rs->rs_transno <= exp->exp_last_committed ||
+           (!rs->rs_on_net && !rs->rs_no_ack) ||
+           cfs_list_empty(&rs->rs_exp_list) ||     /* completed already */
+           cfs_list_empty(&rs->rs_obd_list)) {
+               CDEBUG(D_HA, "Schedule reply immediately\n");
+               ptlrpc_dispatch_difficult_reply(rs);
+       } else {
                cfs_list_add(&rs->rs_list, &svcpt->scp_rep_active);
                rs->rs_scheduled = 0;   /* allow notifier to schedule */
        }
-       cfs_spin_unlock(&rs->rs_lock);
-       cfs_spin_unlock(&svcpt->scp_rep_lock);
+       spin_unlock(&rs->rs_lock);
+       spin_unlock(&svcpt->scp_rep_lock);
        EXIT;
 }
 EXPORT_SYMBOL(target_send_reply);
@@ -2625,16 +2625,17 @@ EXPORT_SYMBOL(ldlm_errno2error);
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
 void ldlm_dump_export_locks(struct obd_export *exp)
 {
-        cfs_spin_lock(&exp->exp_locks_list_guard);
-        if (!cfs_list_empty(&exp->exp_locks_list)) {
-            struct ldlm_lock *lock;
-
-            CERROR("dumping locks for export %p,"
-                   "ignore if the unmount doesn't hang\n", exp);
-            cfs_list_for_each_entry(lock, &exp->exp_locks_list, l_exp_refs_link)
-                LDLM_ERROR(lock, "lock:");
-        }
-        cfs_spin_unlock(&exp->exp_locks_list_guard);
+       spin_lock(&exp->exp_locks_list_guard);
+       if (!cfs_list_empty(&exp->exp_locks_list)) {
+               struct ldlm_lock *lock;
+
+               CERROR("dumping locks for export %p,"
+                      "ignore if the unmount doesn't hang\n", exp);
+               cfs_list_for_each_entry(lock, &exp->exp_locks_list,
+                                       l_exp_refs_link)
+                       LDLM_ERROR(lock, "lock:");
+       }
+       spin_unlock(&exp->exp_locks_list_guard);
 }
 #endif
 
index 8c78ae6..559bef6 100644 (file)
@@ -266,11 +266,11 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
                 RETURN(0);
         }
 
-        cfs_spin_lock(&ns->ns_lock);
-        rc = ldlm_lock_remove_from_lru_nolock(lock);
-        cfs_spin_unlock(&ns->ns_lock);
-        EXIT;
-        return rc;
+       spin_lock(&ns->ns_lock);
+       rc = ldlm_lock_remove_from_lru_nolock(lock);
+       spin_unlock(&ns->ns_lock);
+       EXIT;
+       return rc;
 }
 
 void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
@@ -287,13 +287,13 @@ void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
 
 void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
 {
-        struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
+       struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
 
-        ENTRY;
-        cfs_spin_lock(&ns->ns_lock);
-        ldlm_lock_add_to_lru_nolock(lock);
-        cfs_spin_unlock(&ns->ns_lock);
-        EXIT;
+       ENTRY;
+       spin_lock(&ns->ns_lock);
+       ldlm_lock_add_to_lru_nolock(lock);
+       spin_unlock(&ns->ns_lock);
+       EXIT;
 }
 
 void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
@@ -307,13 +307,13 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
                 return;
         }
 
-        cfs_spin_lock(&ns->ns_lock);
-        if (!cfs_list_empty(&lock->l_lru)) {
-                ldlm_lock_remove_from_lru_nolock(lock);
-                ldlm_lock_add_to_lru_nolock(lock);
-        }
-        cfs_spin_unlock(&ns->ns_lock);
-        EXIT;
+       spin_lock(&ns->ns_lock);
+       if (!cfs_list_empty(&lock->l_lru)) {
+               ldlm_lock_remove_from_lru_nolock(lock);
+               ldlm_lock_add_to_lru_nolock(lock);
+       }
+       spin_unlock(&ns->ns_lock);
+       EXIT;
 }
 
 /* This used to have a 'strict' flag, which recovery would use to mark an
@@ -432,7 +432,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
         if (lock == NULL)
                 RETURN(NULL);
 
-        cfs_spin_lock_init(&lock->l_lock);
+       spin_lock_init(&lock->l_lock);
         lock->l_resource = resource;
         lu_ref_add(&resource->lr_reference, "lock", lock);
 
@@ -506,7 +506,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
          * lock->l_lock, and are taken in the memory address order to avoid
          * dead-locks.
          */
-        cfs_spin_lock(&lock->l_lock);
+       spin_lock(&lock->l_lock);
         oldres = lock->l_resource;
         if (oldres < newres) {
                 lock_res(oldres);
index 52df532..346a6e7 100644 (file)
@@ -62,7 +62,7 @@ CFS_MODULE_PARM(ldlm_cpts, "s", charp, 0444,
 
 extern cfs_mem_cache_t *ldlm_resource_slab;
 extern cfs_mem_cache_t *ldlm_lock_slab;
-static cfs_mutex_t      ldlm_ref_mutex;
+static struct mutex    ldlm_ref_mutex;
 static int ldlm_refcount;
 
 struct ldlm_cb_async_args {
@@ -93,7 +93,7 @@ static inline unsigned int ldlm_get_rq_timeout(void)
 #define ELT_TERMINATE 2
 
 struct ldlm_bl_pool {
-        cfs_spinlock_t          blp_lock;
+       spinlock_t              blp_lock;
 
         /*
          * blp_prio_list is used for callbacks that should be handled
@@ -109,7 +109,7 @@ struct ldlm_bl_pool {
         cfs_list_t              blp_list;
 
         cfs_waitq_t             blp_waitq;
-        cfs_completion_t        blp_comp;
+       struct completion        blp_comp;
         cfs_atomic_t            blp_num_threads;
         cfs_atomic_t            blp_busy_threads;
         int                     blp_min_threads;
@@ -123,7 +123,7 @@ struct ldlm_bl_work_item {
         struct ldlm_lock       *blwi_lock;
         cfs_list_t              blwi_head;
         int                     blwi_count;
-        cfs_completion_t        blwi_comp;
+       struct completion        blwi_comp;
         int                     blwi_mode;
         int                     blwi_mem_pressure;
 };
@@ -131,7 +131,7 @@ struct ldlm_bl_work_item {
 #if defined(HAVE_SERVER_SUPPORT) && defined(__KERNEL__)
 
 /* w_l_spinlock protects both waiting_locks_list and expired_lock_thread */
-static cfs_spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
+static spinlock_t waiting_locks_spinlock;   /* BH lock (timer) */
 static cfs_list_t waiting_locks_list;
 static cfs_timer_t waiting_locks_timer;
 
@@ -144,14 +144,14 @@ static struct expired_lock_thread {
 
 static inline int have_expired_locks(void)
 {
-        int need_to_run;
+       int need_to_run;
 
-        ENTRY;
-        cfs_spin_lock_bh(&waiting_locks_spinlock);
-        need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
-        cfs_spin_unlock_bh(&waiting_locks_spinlock);
+       ENTRY;
+       spin_lock_bh(&waiting_locks_spinlock);
+       need_to_run = !cfs_list_empty(&expired_lock_thread.elt_expired_locks);
+       spin_unlock_bh(&waiting_locks_spinlock);
 
-        RETURN(need_to_run);
+       RETURN(need_to_run);
 }
 
 static int expired_lock_main(void *arg)
@@ -172,19 +172,19 @@ static int expired_lock_main(void *arg)
                              expired_lock_thread.elt_state == ELT_TERMINATE,
                              &lwi);
 
-                cfs_spin_lock_bh(&waiting_locks_spinlock);
-                if (expired_lock_thread.elt_dump) {
-                        struct libcfs_debug_msg_data msgdata = {
-                                .msg_file = __FILE__,
-                                .msg_fn = "waiting_locks_callback",
-                                .msg_line = expired_lock_thread.elt_dump };
-                        cfs_spin_unlock_bh(&waiting_locks_spinlock);
+               spin_lock_bh(&waiting_locks_spinlock);
+               if (expired_lock_thread.elt_dump) {
+                       struct libcfs_debug_msg_data msgdata = {
+                               .msg_file = __FILE__,
+                               .msg_fn = "waiting_locks_callback",
+                               .msg_line = expired_lock_thread.elt_dump };
+                       spin_unlock_bh(&waiting_locks_spinlock);
 
-                        /* from waiting_locks_callback, but not in timer */
-                        libcfs_debug_dumplog();
-                        libcfs_run_lbug_upcall(&msgdata);
+                       /* from waiting_locks_callback, but not in timer */
+                       libcfs_debug_dumplog();
+                       libcfs_run_lbug_upcall(&msgdata);
 
-                        cfs_spin_lock_bh(&waiting_locks_spinlock);
+                       spin_lock_bh(&waiting_locks_spinlock);
                         expired_lock_thread.elt_dump = 0;
                 }
 
@@ -198,7 +198,7 @@ static int expired_lock_main(void *arg)
                                           l_pending_chain);
                         if ((void *)lock < LP_POISON + CFS_PAGE_SIZE &&
                             (void *)lock >= LP_POISON) {
-                                cfs_spin_unlock_bh(&waiting_locks_spinlock);
+                               spin_unlock_bh(&waiting_locks_spinlock);
                                 CERROR("free lock on elt list %p\n", lock);
                                 LBUG();
                         }
@@ -222,20 +222,20 @@ static int expired_lock_main(void *arg)
                                LDLM_LOCK_RELEASE(lock);
                                continue;
                        }
-                        export = class_export_lock_get(lock->l_export, lock);
-                        cfs_spin_unlock_bh(&waiting_locks_spinlock);
+                       export = class_export_lock_get(lock->l_export, lock);
+                       spin_unlock_bh(&waiting_locks_spinlock);
 
-                        do_dump++;
-                        class_fail_export(export);
-                        class_export_lock_put(export, lock);
+                       do_dump++;
+                       class_fail_export(export);
+                       class_export_lock_put(export, lock);
 
-                        /* release extra ref grabbed by ldlm_add_waiting_lock()
-                         * or ldlm_failed_ast() */
-                        LDLM_LOCK_RELEASE(lock);
+                       /* release extra ref grabbed by ldlm_add_waiting_lock()
+                        * or ldlm_failed_ast() */
+                       LDLM_LOCK_RELEASE(lock);
 
-                        cfs_spin_lock_bh(&waiting_locks_spinlock);
-                }
-                cfs_spin_unlock_bh(&waiting_locks_spinlock);
+                       spin_lock_bh(&waiting_locks_spinlock);
+               }
+               spin_unlock_bh(&waiting_locks_spinlock);
 
                 if (do_dump && obd_dump_on_eviction) {
                         CERROR("dump the log upon eviction\n");
@@ -260,24 +260,24 @@ static int __ldlm_add_waiting_lock(struct ldlm_lock *lock, int seconds);
  */
 static int ldlm_lock_busy(struct ldlm_lock *lock)
 {
-        struct ptlrpc_request *req;
-        int match = 0;
-        ENTRY;
+       struct ptlrpc_request *req;
+       int match = 0;
+       ENTRY;
 
-        if (lock->l_export == NULL)
-                return 0;
+       if (lock->l_export == NULL)
+               return 0;
 
-        cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
-        cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
-                                rq_exp_list) {
-                if (req->rq_ops->hpreq_lock_match) {
-                        match = req->rq_ops->hpreq_lock_match(req, lock);
-                        if (match)
-                                break;
-                }
-        }
-        cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
-        RETURN(match);
+       spin_lock_bh(&lock->l_export->exp_rpc_lock);
+       cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
+                               rq_exp_list) {
+               if (req->rq_ops->hpreq_lock_match) {
+                       match = req->rq_ops->hpreq_lock_match(req, lock);
+                       if (match)
+                               break;
+               }
+       }
+       spin_unlock_bh(&lock->l_export->exp_rpc_lock);
+       RETURN(match);
 }
 
 /* This is called from within a timer interrupt and cannot schedule */
@@ -286,7 +286,7 @@ static void waiting_locks_callback(unsigned long unused)
        struct ldlm_lock        *lock;
        int                     need_dump = 0;
 
-        cfs_spin_lock_bh(&waiting_locks_spinlock);
+       spin_lock_bh(&waiting_locks_spinlock);
         while (!cfs_list_empty(&waiting_locks_list)) {
                 lock = cfs_list_entry(waiting_locks_list.next, struct ldlm_lock,
                                       l_pending_chain);
@@ -352,11 +352,11 @@ static void waiting_locks_callback(unsigned long unused)
 
                         LDLM_LOCK_GET(lock);
 
-                        cfs_spin_unlock_bh(&waiting_locks_spinlock);
-                        LDLM_DEBUG(lock, "prolong the busy lock");
-                        ldlm_refresh_waiting_lock(lock,
-                                                  ldlm_get_enq_timeout(lock));
-                        cfs_spin_lock_bh(&waiting_locks_spinlock);
+                       spin_unlock_bh(&waiting_locks_spinlock);
+                       LDLM_DEBUG(lock, "prolong the busy lock");
+                       ldlm_refresh_waiting_lock(lock,
+                                                 ldlm_get_enq_timeout(lock));
+                       spin_lock_bh(&waiting_locks_spinlock);
 
                         if (!cont) {
                                 LDLM_LOCK_RELEASE(lock);
@@ -400,7 +400,7 @@ static void waiting_locks_callback(unsigned long unused)
                 timeout_rounded = (cfs_time_t)round_timeout(lock->l_callback_timeout);
                 cfs_timer_arm(&waiting_locks_timer, timeout_rounded);
         }
-        cfs_spin_unlock_bh(&waiting_locks_spinlock);
+       spin_unlock_bh(&waiting_locks_spinlock);
 }
 
 /*
@@ -454,10 +454,10 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
 
        LASSERT(!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK));
 
-        cfs_spin_lock_bh(&waiting_locks_spinlock);
-        if (lock->l_destroyed) {
-                static cfs_time_t next;
-                cfs_spin_unlock_bh(&waiting_locks_spinlock);
+       spin_lock_bh(&waiting_locks_spinlock);
+       if (lock->l_destroyed) {
+               static cfs_time_t next;
+               spin_unlock_bh(&waiting_locks_spinlock);
                 LDLM_ERROR(lock, "not waiting on destroyed lock (bug 5653)");
                 if (cfs_time_after(cfs_time_current(), next)) {
                         next = cfs_time_shift(14400);
@@ -472,20 +472,20 @@ static int ldlm_add_waiting_lock(struct ldlm_lock *lock)
                  * waiting list */
                 LDLM_LOCK_GET(lock);
         }
-        cfs_spin_unlock_bh(&waiting_locks_spinlock);
+       spin_unlock_bh(&waiting_locks_spinlock);
 
-        if (ret) {
-                cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
-                if (cfs_list_empty(&lock->l_exp_list))
-                        cfs_list_add(&lock->l_exp_list,
-                                     &lock->l_export->exp_bl_list);
-                cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
-        }
+       if (ret) {
+               spin_lock_bh(&lock->l_export->exp_bl_list_lock);
+               if (cfs_list_empty(&lock->l_exp_list))
+                       cfs_list_add(&lock->l_exp_list,
+                                    &lock->l_export->exp_bl_list);
+               spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
+       }
 
-        LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
-                   ret == 0 ? "not re-" : "", timeout,
-                   AT_OFF ? "off" : "on");
-        return ret;
+       LDLM_DEBUG(lock, "%sadding to wait list(timeout: %d, AT: %s)",
+                  ret == 0 ? "not re-" : "", timeout,
+                  AT_OFF ? "off" : "on");
+       return ret;
 }
 
 /*
@@ -533,14 +533,14 @@ int ldlm_del_waiting_lock(struct ldlm_lock *lock)
                 return 0;
         }
 
-        cfs_spin_lock_bh(&waiting_locks_spinlock);
-        ret = __ldlm_del_waiting_lock(lock);
-        cfs_spin_unlock_bh(&waiting_locks_spinlock);
+       spin_lock_bh(&waiting_locks_spinlock);
+       ret = __ldlm_del_waiting_lock(lock);
+       spin_unlock_bh(&waiting_locks_spinlock);
 
-        /* remove the lock out of export blocking list */
-        cfs_spin_lock_bh(&lock->l_export->exp_bl_list_lock);
-        cfs_list_del_init(&lock->l_exp_list);
-        cfs_spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
+       /* remove the lock out of export blocking list */
+       spin_lock_bh(&lock->l_export->exp_bl_list_lock);
+       cfs_list_del_init(&lock->l_exp_list);
+       spin_unlock_bh(&lock->l_export->exp_bl_list_lock);
 
         if (ret) {
                 /* release lock ref if it has indeed been removed
@@ -560,28 +560,28 @@ EXPORT_SYMBOL(ldlm_del_waiting_lock);
  */
 int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
 {
-        if (lock->l_export == NULL) {
-                /* We don't have a "waiting locks list" on clients. */
-                LDLM_DEBUG(lock, "client lock: no-op");
-                return 0;
-        }
+       if (lock->l_export == NULL) {
+               /* We don't have a "waiting locks list" on clients. */
+               LDLM_DEBUG(lock, "client lock: no-op");
+               return 0;
+       }
 
-        cfs_spin_lock_bh(&waiting_locks_spinlock);
+       spin_lock_bh(&waiting_locks_spinlock);
 
-        if (cfs_list_empty(&lock->l_pending_chain)) {
-                cfs_spin_unlock_bh(&waiting_locks_spinlock);
-                LDLM_DEBUG(lock, "wasn't waiting");
-                return 0;
-        }
+       if (cfs_list_empty(&lock->l_pending_chain)) {
+               spin_unlock_bh(&waiting_locks_spinlock);
+               LDLM_DEBUG(lock, "wasn't waiting");
+               return 0;
+       }
 
-        /* we remove/add the lock to the waiting list, so no needs to
-         * release/take a lock reference */
-        __ldlm_del_waiting_lock(lock);
-        __ldlm_add_waiting_lock(lock, timeout);
-        cfs_spin_unlock_bh(&waiting_locks_spinlock);
+       /* we remove/add the lock to the waiting list, so no needs to
+        * release/take a lock reference */
+       __ldlm_del_waiting_lock(lock);
+       __ldlm_add_waiting_lock(lock, timeout);
+       spin_unlock_bh(&waiting_locks_spinlock);
 
-        LDLM_DEBUG(lock, "refreshed");
-        return 1;
+       LDLM_DEBUG(lock, "refreshed");
+       return 1;
 }
 EXPORT_SYMBOL(ldlm_refresh_waiting_lock);
 
@@ -621,17 +621,17 @@ static void ldlm_failed_ast(struct ldlm_lock *lock, int rc,
         if (obd_dump_on_timeout)
                 libcfs_debug_dumplog();
 #ifdef __KERNEL__
-        cfs_spin_lock_bh(&waiting_locks_spinlock);
-        if (__ldlm_del_waiting_lock(lock) == 0)
-                /* the lock was not in any list, grab an extra ref before adding
-                 * the lock to the expired list */
-                LDLM_LOCK_GET(lock);
-        cfs_list_add(&lock->l_pending_chain,
-                     &expired_lock_thread.elt_expired_locks);
-        cfs_waitq_signal(&expired_lock_thread.elt_waitq);
-        cfs_spin_unlock_bh(&waiting_locks_spinlock);
+       spin_lock_bh(&waiting_locks_spinlock);
+       if (__ldlm_del_waiting_lock(lock) == 0)
+               /* the lock was not in any list, grab an extra ref before adding
+                * the lock to the expired list */
+               LDLM_LOCK_GET(lock);
+       cfs_list_add(&lock->l_pending_chain,
+                    &expired_lock_thread.elt_expired_locks);
+       cfs_waitq_signal(&expired_lock_thread.elt_waitq);
+       spin_unlock_bh(&waiting_locks_spinlock);
 #else
-        class_fail_export(lock->l_export);
+       class_fail_export(lock->l_export);
 #endif
 }
 
@@ -780,7 +780,7 @@ static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
                 RETURN_EXIT;
         }
 
-        cfs_spin_lock_bh(&lock->l_export->exp_rpc_lock);
+       spin_lock_bh(&lock->l_export->exp_rpc_lock);
         cfs_list_for_each_entry(req, &lock->l_export->exp_hp_rpcs,
                                 rq_exp_list) {
                 /* Do not process requests that were not yet added to there
@@ -791,8 +791,8 @@ static void ldlm_lock_reorder_req(struct ldlm_lock *lock)
                     req->rq_ops->hpreq_lock_match(req, lock))
                         ptlrpc_hpreq_reorder(req);
         }
-        cfs_spin_unlock_bh(&lock->l_export->exp_rpc_lock);
-        EXIT;
+       spin_unlock_bh(&lock->l_export->exp_rpc_lock);
+       EXIT;
 }
 
 /*
@@ -1813,27 +1813,28 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
 #ifdef __KERNEL__
 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi, int mode)
 {
-        struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
-        ENTRY;
+       struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
+       ENTRY;
 
-        cfs_spin_lock(&blp->blp_lock);
-        if (blwi->blwi_lock && blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
-                /* add LDLM_FL_DISCARD_DATA requests to the priority list */
-                cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
-        } else {
-                /* other blocking callbacks are added to the regular list */
-                cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
-        }
-        cfs_spin_unlock(&blp->blp_lock);
+       spin_lock(&blp->blp_lock);
+       if (blwi->blwi_lock &&
+           blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+               /* add LDLM_FL_DISCARD_DATA requests to the priority list */
+               cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
+       } else {
+               /* other blocking callbacks are added to the regular list */
+               cfs_list_add_tail(&blwi->blwi_entry, &blp->blp_list);
+       }
+       spin_unlock(&blp->blp_lock);
 
-        cfs_waitq_signal(&blp->blp_waitq);
+       cfs_waitq_signal(&blp->blp_waitq);
 
-        /* can not use blwi->blwi_mode as blwi could be already freed in
-           LDLM_ASYNC mode */
-        if (mode == LDLM_SYNC)
-                cfs_wait_for_completion(&blwi->blwi_comp);
+       /* can not use blwi->blwi_mode as blwi could be already freed in
+          LDLM_ASYNC mode */
+       if (mode == LDLM_SYNC)
+               wait_for_completion(&blwi->blwi_comp);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
@@ -1843,7 +1844,7 @@ static inline void init_blwi(struct ldlm_bl_work_item *blwi,
                              struct ldlm_lock *lock,
                              int mode)
 {
-        cfs_init_completion(&blwi->blwi_comp);
+       init_completion(&blwi->blwi_comp);
         CFS_INIT_LIST_HEAD(&blwi->blwi_head);
 
         if (cfs_memory_pressure_get())
@@ -2386,10 +2387,10 @@ EXPORT_SYMBOL(ldlm_revoke_export_locks);
 #ifdef __KERNEL__
 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
 {
-        struct ldlm_bl_work_item *blwi = NULL;
-        static unsigned int num_bl = 0;
+       struct ldlm_bl_work_item *blwi = NULL;
+       static unsigned int num_bl = 0;
 
-        cfs_spin_lock(&blp->blp_lock);
+       spin_lock(&blp->blp_lock);
         /* process a request from the blp_list at least every blp_num_threads */
         if (!cfs_list_empty(&blp->blp_list) &&
             (cfs_list_empty(&blp->blp_prio_list) || num_bl == 0))
@@ -2406,36 +2407,36 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
                         num_bl = 0;
                 cfs_list_del(&blwi->blwi_entry);
         }
-        cfs_spin_unlock(&blp->blp_lock);
+       spin_unlock(&blp->blp_lock);
 
-        return blwi;
+       return blwi;
 }
 
 /* This only contains temporary data until the thread starts */
 struct ldlm_bl_thread_data {
-        char                    bltd_name[CFS_CURPROC_COMM_MAX];
-        struct ldlm_bl_pool     *bltd_blp;
-        cfs_completion_t        bltd_comp;
-        int                     bltd_num;
+       char                    bltd_name[CFS_CURPROC_COMM_MAX];
+       struct ldlm_bl_pool     *bltd_blp;
+       struct completion       bltd_comp;
+       int                     bltd_num;
 };
 
 static int ldlm_bl_thread_main(void *arg);
 
 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
 {
-        struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
-        int rc;
+       struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
+       int rc;
 
-        cfs_init_completion(&bltd.bltd_comp);
-        rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
-        if (rc < 0) {
-                CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
-                       cfs_atomic_read(&blp->blp_num_threads), rc);
-                return rc;
-        }
-        cfs_wait_for_completion(&bltd.bltd_comp);
+       init_completion(&bltd.bltd_comp);
+       rc = cfs_create_thread(ldlm_bl_thread_main, &bltd, 0);
+       if (rc < 0) {
+               CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %d\n",
+                      cfs_atomic_read(&blp->blp_num_threads), rc);
+               return rc;
+       }
+       wait_for_completion(&bltd.bltd_comp);
 
-        return 0;
+       return 0;
 }
 
 static int ldlm_bl_thread_main(void *arg)
@@ -2456,7 +2457,7 @@ static int ldlm_bl_thread_main(void *arg)
                         "ldlm_bl_%02d", bltd->bltd_num);
                 cfs_daemonize(bltd->bltd_name);
 
-                cfs_complete(&bltd->bltd_comp);
+               complete(&bltd->bltd_comp);
                 /* cannot use bltd after this, it is only on caller's stack */
         }
 
@@ -2511,12 +2512,12 @@ static int ldlm_bl_thread_main(void *arg)
                 if (blwi->blwi_mode == LDLM_ASYNC)
                         OBD_FREE(blwi, sizeof(*blwi));
                 else
-                        cfs_complete(&blwi->blwi_comp);
+                       complete(&blwi->blwi_comp);
         }
 
         cfs_atomic_dec(&blp->blp_busy_threads);
         cfs_atomic_dec(&blp->blp_num_threads);
-        cfs_complete(&blp->blp_comp);
+       complete(&blp->blp_comp);
         RETURN(0);
 }
 
@@ -2529,13 +2530,13 @@ int ldlm_get_ref(void)
 {
         int rc = 0;
         ENTRY;
-        cfs_mutex_lock(&ldlm_ref_mutex);
+       mutex_lock(&ldlm_ref_mutex);
         if (++ldlm_refcount == 1) {
                 rc = ldlm_setup();
                 if (rc)
                         ldlm_refcount--;
         }
-        cfs_mutex_unlock(&ldlm_ref_mutex);
+       mutex_unlock(&ldlm_ref_mutex);
 
         RETURN(rc);
 }
@@ -2544,7 +2545,7 @@ EXPORT_SYMBOL(ldlm_get_ref);
 void ldlm_put_ref(void)
 {
         ENTRY;
-        cfs_mutex_lock(&ldlm_ref_mutex);
+       mutex_lock(&ldlm_ref_mutex);
         if (ldlm_refcount == 1) {
                 int rc = ldlm_cleanup();
                 if (rc)
@@ -2554,7 +2555,7 @@ void ldlm_put_ref(void)
         } else {
                 ldlm_refcount--;
         }
-        cfs_mutex_unlock(&ldlm_ref_mutex);
+       mutex_unlock(&ldlm_ref_mutex);
 
         EXIT;
 }
@@ -2768,9 +2769,9 @@ static int ldlm_setup(void)
        OBD_ALLOC(blp, sizeof(*blp));
        if (blp == NULL)
                GOTO(out, rc = -ENOMEM);
-        ldlm_state->ldlm_bl_pool = blp;
+       ldlm_state->ldlm_bl_pool = blp;
 
-        cfs_spin_lock_init(&blp->blp_lock);
+       spin_lock_init(&blp->blp_lock);
         CFS_INIT_LIST_HEAD(&blp->blp_list);
         CFS_INIT_LIST_HEAD(&blp->blp_prio_list);
         cfs_waitq_init(&blp->blp_waitq);
@@ -2799,7 +2800,7 @@ static int ldlm_setup(void)
         cfs_waitq_init(&expired_lock_thread.elt_waitq);
 
         CFS_INIT_LIST_HEAD(&waiting_locks_list);
-        cfs_spin_lock_init(&waiting_locks_spinlock);
+       spin_lock_init(&waiting_locks_spinlock);
         cfs_timer_init(&waiting_locks_timer, waiting_locks_callback, 0);
 
         rc = cfs_create_thread(expired_lock_main, NULL, CFS_DAEMON_FLAGS);
@@ -2846,14 +2847,14 @@ static int ldlm_cleanup(void)
                while (cfs_atomic_read(&blp->blp_num_threads) > 0) {
                        struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
 
-                       cfs_init_completion(&blp->blp_comp);
+                       init_completion(&blp->blp_comp);
 
-                       cfs_spin_lock(&blp->blp_lock);
+                       spin_lock(&blp->blp_lock);
                        cfs_list_add_tail(&blwi.blwi_entry, &blp->blp_list);
                        cfs_waitq_signal(&blp->blp_waitq);
-                       cfs_spin_unlock(&blp->blp_lock);
+                       spin_unlock(&blp->blp_lock);
 
-                       cfs_wait_for_completion(&blp->blp_comp);
+                       wait_for_completion(&blp->blp_comp);
                }
 
                OBD_FREE(blp, sizeof(*blp));
@@ -2888,9 +2889,9 @@ static int ldlm_cleanup(void)
 
 int ldlm_init(void)
 {
-        cfs_mutex_init(&ldlm_ref_mutex);
-        cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
-        cfs_mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+       mutex_init(&ldlm_ref_mutex);
+       mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
+       mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
         ldlm_resource_slab = cfs_mem_cache_create("ldlm_resources",
                                                sizeof(struct ldlm_resource), 0,
                                                CFS_SLAB_HWCACHE_ALIGN);
index 565c6f7..beba888 100644 (file)
@@ -329,9 +329,9 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
          */
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL);
-        cfs_write_lock(&obd->obd_pool_lock);
+       write_lock(&obd->obd_pool_lock);
         obd->obd_pool_slv = pl->pl_server_lock_volume;
-        cfs_write_unlock(&obd->obd_pool_lock);
+       write_unlock(&obd->obd_pool_lock);
 }
 
 /**
@@ -348,12 +348,12 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
         if (recalc_interval_sec < pl->pl_recalc_period)
                 RETURN(0);
 
-        cfs_spin_lock(&pl->pl_lock);
-        recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
-        if (recalc_interval_sec < pl->pl_recalc_period) {
-                cfs_spin_unlock(&pl->pl_lock);
-                RETURN(0);
-        }
+       spin_lock(&pl->pl_lock);
+       recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+       if (recalc_interval_sec < pl->pl_recalc_period) {
+               spin_unlock(&pl->pl_lock);
+               RETURN(0);
+       }
         /*
          * Recalc SLV after last period. This should be done
          * _before_ recalculating new grant plan.
@@ -373,8 +373,8 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
         pl->pl_recalc_time = cfs_time_current_sec();
         lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                             recalc_interval_sec);
-        cfs_spin_unlock(&pl->pl_lock);
-        RETURN(0);
+       spin_unlock(&pl->pl_lock);
+       RETURN(0);
 }
 
 /**
@@ -403,7 +403,7 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
         if (cfs_atomic_read(&pl->pl_granted) == 0)
                 RETURN(0);
 
-        cfs_spin_lock(&pl->pl_lock);
+       spin_lock(&pl->pl_lock);
 
         /*
          * We want shrinker to possibly cause cancellation of @nr locks from
@@ -428,13 +428,13 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
          * Make sure that pool informed obd of last SLV changes.
          */
         ldlm_srv_pool_push_slv(pl);
-        cfs_spin_unlock(&pl->pl_lock);
+       spin_unlock(&pl->pl_lock);
 
-        /*
-         * We did not really free any memory here so far, it only will be
-         * freed later may be, so that we return 0 to not confuse VM.
-         */
-        return 0;
+       /*
+        * We did not really free any memory here so far, it only will be
+        * freed later may be, so that we return 0 to not confuse VM.
+        */
+       return 0;
 }
 
 /**
@@ -447,9 +447,9 @@ static int ldlm_srv_pool_setup(struct ldlm_pool *pl, int limit)
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL && obd != LP_POISON);
         LASSERT(obd->obd_type != LP_POISON);
-        cfs_write_lock(&obd->obd_pool_lock);
+       write_lock(&obd->obd_pool_lock);
         obd->obd_pool_limit = limit;
-        cfs_write_unlock(&obd->obd_pool_lock);
+       write_unlock(&obd->obd_pool_lock);
 
         ldlm_pool_set_limit(pl, limit);
         return 0;
@@ -468,10 +468,10 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
          */
         obd = ldlm_pl2ns(pl)->ns_obd;
         LASSERT(obd != NULL);
-        cfs_read_lock(&obd->obd_pool_lock);
+       read_lock(&obd->obd_pool_lock);
         pl->pl_server_lock_volume = obd->obd_pool_slv;
         ldlm_pool_set_limit(pl, obd->obd_pool_limit);
-        cfs_read_unlock(&obd->obd_pool_lock);
+       read_unlock(&obd->obd_pool_lock);
 }
 
 /**
@@ -486,13 +486,13 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
         if (recalc_interval_sec < pl->pl_recalc_period)
                 RETURN(0);
 
-        cfs_spin_lock(&pl->pl_lock);
-        /*
-         * Check if we need to recalc lists now.
-         */
-        recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
-        if (recalc_interval_sec < pl->pl_recalc_period) {
-                cfs_spin_unlock(&pl->pl_lock);
+       spin_lock(&pl->pl_lock);
+       /*
+        * Check if we need to recalc lists now.
+        */
+       recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
+       if (recalc_interval_sec < pl->pl_recalc_period) {
+               spin_unlock(&pl->pl_lock);
                 RETURN(0);
         }
 
@@ -504,7 +504,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
         pl->pl_recalc_time = cfs_time_current_sec();
         lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                             recalc_interval_sec);
-        cfs_spin_unlock(&pl->pl_lock);
+       spin_unlock(&pl->pl_lock);
 
         /*
          * Do not cancel locks in case lru resize is disabled for this ns.
@@ -546,10 +546,10 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
          */
         ldlm_cli_pool_pop_slv(pl);
 
-        cfs_spin_lock(&ns->ns_lock);
-        unused = ns->ns_nr_unused;
-        cfs_spin_unlock(&ns->ns_lock);
-        
+       spin_lock(&ns->ns_lock);
+       unused = ns->ns_nr_unused;
+       spin_unlock(&ns->ns_lock);
+
         if (nr) {
                 canceled = ldlm_cancel_lru(ns, nr, LDLM_ASYNC,
                                            LDLM_CANCEL_SHRINK);
@@ -588,7 +588,7 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
         if (recalc_interval_sec <= 0)
                 goto recalc;
 
-        cfs_spin_lock(&pl->pl_lock);
+       spin_lock(&pl->pl_lock);
         recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
         if (recalc_interval_sec > 0) {
                 /*
@@ -602,7 +602,7 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
                 cfs_atomic_set(&pl->pl_grant_rate, 0);
                 cfs_atomic_set(&pl->pl_cancel_rate, 0);
         }
-        cfs_spin_unlock(&pl->pl_lock);
+       spin_unlock(&pl->pl_lock);
 
  recalc:
         if (pl->pl_ops->po_recalc != NULL) {
@@ -666,7 +666,7 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
         __u64 slv, clv;
         __u32 limit;
 
-        cfs_spin_lock(&pl->pl_lock);
+       spin_lock(&pl->pl_lock);
         slv = pl->pl_server_lock_volume;
         clv = pl->pl_client_lock_volume;
         limit = ldlm_pool_get_limit(pl);
@@ -677,7 +677,7 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
         grant_speed = grant_rate - cancel_rate;
         lvf = cfs_atomic_read(&pl->pl_lock_volume_factor);
         grant_step = ldlm_pool_t2gsp(pl->pl_recalc_period);
-        cfs_spin_unlock(&pl->pl_lock);
+       spin_unlock(&pl->pl_lock);
 
         nr += snprintf(page + nr, count - nr, "LDLM pool state (%s):\n",
                        pl->pl_name);
@@ -705,17 +705,17 @@ static int lprocfs_rd_pool_state(char *page, char **start, off_t off,
 }
 
 static int lprocfs_rd_grant_speed(char *page, char **start, off_t off,
-                                  int count, int *eof, void *data)
+                                 int count, int *eof, void *data)
 {
-        struct ldlm_pool *pl = data;
-        int               grant_speed;
-
-        cfs_spin_lock(&pl->pl_lock);
-        /* serialize with ldlm_pool_recalc */
-        grant_speed = cfs_atomic_read(&pl->pl_grant_rate) -
-                      cfs_atomic_read(&pl->pl_cancel_rate);
-        cfs_spin_unlock(&pl->pl_lock);
-        return lprocfs_rd_uint(page, start, off, count, eof, &grant_speed);
+       struct ldlm_pool *pl = data;
+       int               grant_speed;
+
+       spin_lock(&pl->pl_lock);
+       /* serialize with ldlm_pool_recalc */
+       grant_speed = cfs_atomic_read(&pl->pl_grant_rate) -
+                       cfs_atomic_read(&pl->pl_cancel_rate);
+       spin_unlock(&pl->pl_lock);
+       return lprocfs_rd_uint(page, start, off, count, eof, &grant_speed);
 }
 
 LDLM_POOL_PROC_READER(grant_plan, int);
@@ -872,10 +872,10 @@ static void ldlm_pool_proc_fini(struct ldlm_pool *pl)
 int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
                    int idx, ldlm_side_t client)
 {
-        int rc;
-        ENTRY;
+       int rc;
+       ENTRY;
 
-        cfs_spin_lock_init(&pl->pl_lock);
+       spin_lock_init(&pl->pl_lock);
         cfs_atomic_set(&pl->pl_granted, 0);
         pl->pl_recalc_time = cfs_time_current_sec();
         cfs_atomic_set(&pl->pl_lock_volume_factor, 1);
@@ -981,11 +981,11 @@ EXPORT_SYMBOL(ldlm_pool_del);
  */
 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
 {
-        __u64 slv;
-        cfs_spin_lock(&pl->pl_lock);
-        slv = pl->pl_server_lock_volume;
-        cfs_spin_unlock(&pl->pl_lock);
-        return slv;
+       __u64 slv;
+       spin_lock(&pl->pl_lock);
+       slv = pl->pl_server_lock_volume;
+       spin_unlock(&pl->pl_lock);
+       return slv;
 }
 EXPORT_SYMBOL(ldlm_pool_get_slv);
 
@@ -996,9 +996,9 @@ EXPORT_SYMBOL(ldlm_pool_get_slv);
  */
 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
 {
-        cfs_spin_lock(&pl->pl_lock);
-        pl->pl_server_lock_volume = slv;
-        cfs_spin_unlock(&pl->pl_lock);
+       spin_lock(&pl->pl_lock);
+       pl->pl_server_lock_volume = slv;
+       spin_unlock(&pl->pl_lock);
 }
 EXPORT_SYMBOL(ldlm_pool_set_slv);
 
@@ -1009,11 +1009,11 @@ EXPORT_SYMBOL(ldlm_pool_set_slv);
  */
 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
 {
-        __u64 slv;
-        cfs_spin_lock(&pl->pl_lock);
-        slv = pl->pl_client_lock_volume;
-        cfs_spin_unlock(&pl->pl_lock);
-        return slv;
+       __u64 slv;
+       spin_lock(&pl->pl_lock);
+       slv = pl->pl_client_lock_volume;
+       spin_unlock(&pl->pl_lock);
+       return slv;
 }
 EXPORT_SYMBOL(ldlm_pool_get_clv);
 
@@ -1024,9 +1024,9 @@ EXPORT_SYMBOL(ldlm_pool_get_clv);
  */
 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
 {
-        cfs_spin_lock(&pl->pl_lock);
-        pl->pl_client_lock_volume = clv;
-        cfs_spin_unlock(&pl->pl_lock);
+       spin_lock(&pl->pl_lock);
+       pl->pl_client_lock_volume = clv;
+       spin_unlock(&pl->pl_lock);
 }
 EXPORT_SYMBOL(ldlm_pool_set_clv);
 
@@ -1066,7 +1066,7 @@ static int ldlm_pool_granted(struct ldlm_pool *pl)
 static struct ptlrpc_thread *ldlm_pools_thread;
 static struct cfs_shrinker *ldlm_pools_srv_shrinker;
 static struct cfs_shrinker *ldlm_pools_cli_shrinker;
-static cfs_completion_t ldlm_pools_comp;
+static struct completion ldlm_pools_comp;
 
 /*
  * Cancel \a nr locks from all namespaces (if possible). Returns number of
@@ -1095,16 +1095,16 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
         for (nr_ns = cfs_atomic_read(ldlm_namespace_nr(client));
              nr_ns > 0; nr_ns--)
         {
-                cfs_mutex_lock(ldlm_namespace_lock(client));
+               mutex_lock(ldlm_namespace_lock(client));
                 if (cfs_list_empty(ldlm_namespace_list(client))) {
-                        cfs_mutex_unlock(ldlm_namespace_lock(client));
+                       mutex_unlock(ldlm_namespace_lock(client));
                         cl_env_reexit(cookie);
                         return 0;
                 }
                 ns = ldlm_namespace_first_locked(client);
                 ldlm_namespace_get(ns);
                 ldlm_namespace_move_locked(ns, client);
-                cfs_mutex_unlock(ldlm_namespace_lock(client));
+               mutex_unlock(ldlm_namespace_lock(client));
                 total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
                 ldlm_namespace_put(ns);
         }
@@ -1125,9 +1125,9 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 /*
                  * Do not call shrink under ldlm_namespace_lock(client)
                  */
-                cfs_mutex_lock(ldlm_namespace_lock(client));
+               mutex_lock(ldlm_namespace_lock(client));
                 if (cfs_list_empty(ldlm_namespace_list(client))) {
-                        cfs_mutex_unlock(ldlm_namespace_lock(client));
+                       mutex_unlock(ldlm_namespace_lock(client));
                         /*
                          * If list is empty, we can't return any @cached > 0,
                          * that probably would cause needless shrinker
@@ -1139,7 +1139,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
                 ns = ldlm_namespace_first_locked(client);
                 ldlm_namespace_get(ns);
                 ldlm_namespace_move_locked(ns, client);
-                cfs_mutex_unlock(ldlm_namespace_lock(client));
+               mutex_unlock(ldlm_namespace_lock(client));
 
                 nr_locks = ldlm_pool_granted(&ns->ns_pool);
                 cancel = 1 + nr_locks * nr / total;
@@ -1180,7 +1180,7 @@ void ldlm_pools_recalc(ldlm_side_t client)
                 /*
                  * Check all modest namespaces first.
                  */
-                cfs_mutex_lock(ldlm_namespace_lock(client));
+               mutex_lock(ldlm_namespace_lock(client));
                 cfs_list_for_each_entry(ns, ldlm_namespace_list(client),
                                         ns_list_chain)
                 {
@@ -1243,7 +1243,7 @@ void ldlm_pools_recalc(ldlm_side_t client)
                         }
                         ldlm_pool_setup(&ns->ns_pool, l);
                 }
-                cfs_mutex_unlock(ldlm_namespace_lock(client));
+               mutex_unlock(ldlm_namespace_lock(client));
         }
 
         /*
@@ -1258,28 +1258,28 @@ void ldlm_pools_recalc(ldlm_side_t client)
                  * rid of potential deadlock on client nodes when canceling
                  * locks synchronously.
                  */
-                cfs_mutex_lock(ldlm_namespace_lock(client));
-                if (cfs_list_empty(ldlm_namespace_list(client))) {
-                        cfs_mutex_unlock(ldlm_namespace_lock(client));
-                        break;
-                }
-                ns = ldlm_namespace_first_locked(client);
-
-                cfs_spin_lock(&ns->ns_lock);
-                /*
-                 * skip ns which is being freed, and we don't want to increase
-                 * its refcount again, not even temporarily. bz21519 & LU-499.
-                 */
-                if (ns->ns_stopping) {
-                        skip = 1;
-                } else {
-                        skip = 0;
-                        ldlm_namespace_get(ns);
-                }
-                cfs_spin_unlock(&ns->ns_lock);
-
-                ldlm_namespace_move_locked(ns, client);
-                cfs_mutex_unlock(ldlm_namespace_lock(client));
+               mutex_lock(ldlm_namespace_lock(client));
+               if (cfs_list_empty(ldlm_namespace_list(client))) {
+                       mutex_unlock(ldlm_namespace_lock(client));
+                       break;
+               }
+               ns = ldlm_namespace_first_locked(client);
+
+               spin_lock(&ns->ns_lock);
+               /*
+                * skip ns which is being freed, and we don't want to increase
+                * its refcount again, not even temporarily. bz21519 & LU-499.
+                */
+               if (ns->ns_stopping) {
+                       skip = 1;
+               } else {
+                       skip = 0;
+                       ldlm_namespace_get(ns);
+               }
+               spin_unlock(&ns->ns_lock);
+
+               ldlm_namespace_move_locked(ns, client);
+               mutex_unlock(ldlm_namespace_lock(client));
 
                 /*
                  * After setup is done - recalc the pool.
@@ -1337,7 +1337,7 @@ static int ldlm_pools_thread_main(void *arg)
         CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
                t_name, cfs_curproc_pid());
 
-        cfs_complete_and_exit(&ldlm_pools_comp, 0);
+       complete_and_exit(&ldlm_pools_comp, 0);
 }
 
 static int ldlm_pools_thread_start(void)
@@ -1353,7 +1353,7 @@ static int ldlm_pools_thread_start(void)
         if (ldlm_pools_thread == NULL)
                 RETURN(-ENOMEM);
 
-        cfs_init_completion(&ldlm_pools_comp);
+       init_completion(&ldlm_pools_comp);
         cfs_waitq_init(&ldlm_pools_thread->t_ctl_waitq);
 
         /*
@@ -1391,7 +1391,7 @@ static void ldlm_pools_thread_stop(void)
          * This fixes possible race and oops due to accessing freed memory
          * in pools thread.
          */
-        cfs_wait_for_completion(&ldlm_pools_comp);
+       wait_for_completion(&ldlm_pools_comp);
         OBD_FREE_PTR(ldlm_pools_thread);
         ldlm_pools_thread = NULL;
         EXIT;
index 1435d0a..f7fc23b 100644 (file)
@@ -249,11 +249,11 @@ noreproc:
                                        interrupted_completion_wait, &lwd);
         }
 
-        if (imp != NULL) {
-                cfs_spin_lock(&imp->imp_lock);
-                lwd.lwd_conn_cnt = imp->imp_conn_cnt;
-                cfs_spin_unlock(&imp->imp_lock);
-        }
+       if (imp != NULL) {
+               spin_lock(&imp->imp_lock);
+               lwd.lwd_conn_cnt = imp->imp_conn_cnt;
+               spin_unlock(&imp->imp_lock);
+       }
 
         if (ns_is_client(ldlm_lock_to_ns(lock)) &&
             OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
@@ -1244,10 +1244,10 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
          * alive in cleanup time. Evil races are possible which may cause
          * oops in that time.
          */
-        cfs_write_lock(&obd->obd_pool_lock);
+       write_lock(&obd->obd_pool_lock);
         obd->obd_pool_slv = new_slv;
         obd->obd_pool_limit = new_limit;
-        cfs_write_unlock(&obd->obd_pool_lock);
+       write_unlock(&obd->obd_pool_lock);
 
         RETURN(0);
 }
@@ -1549,12 +1549,12 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
 static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                                  int count, int max, int flags)
 {
-        ldlm_cancel_lru_policy_t pf;
-        struct ldlm_lock *lock, *next;
-        int added = 0, unused, remained;
-        ENTRY;
+       ldlm_cancel_lru_policy_t pf;
+       struct ldlm_lock *lock, *next;
+       int added = 0, unused, remained;
+       ENTRY;
 
-        cfs_spin_lock(&ns->ns_lock);
+       spin_lock(&ns->ns_lock);
         unused = ns->ns_nr_unused;
         remained = unused;
 
@@ -1596,7 +1596,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                         break;
 
                 LDLM_LOCK_GET(lock);
-                cfs_spin_unlock(&ns->ns_lock);
+               spin_unlock(&ns->ns_lock);
                 lu_ref_add(&lock->l_reference, __FUNCTION__, cfs_current());
 
                 /* Pass the lock through the policy filter and see if it
@@ -1617,14 +1617,14 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                         lu_ref_del(&lock->l_reference,
                                    __FUNCTION__, cfs_current());
                         LDLM_LOCK_RELEASE(lock);
-                        cfs_spin_lock(&ns->ns_lock);
-                        break;
-                }
-                if (result == LDLM_POLICY_SKIP_LOCK) {
-                        lu_ref_del(&lock->l_reference,
-                                   __FUNCTION__, cfs_current());
-                        LDLM_LOCK_RELEASE(lock);
-                        cfs_spin_lock(&ns->ns_lock);
+                       spin_lock(&ns->ns_lock);
+                       break;
+               }
+               if (result == LDLM_POLICY_SKIP_LOCK) {
+                       lu_ref_del(&lock->l_reference,
+                                  __func__, cfs_current());
+                       LDLM_LOCK_RELEASE(lock);
+                       spin_lock(&ns->ns_lock);
                         continue;
                 }
 
@@ -1641,7 +1641,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                         lu_ref_del(&lock->l_reference,
                                    __FUNCTION__, cfs_current());
                         LDLM_LOCK_RELEASE(lock);
-                        cfs_spin_lock(&ns->ns_lock);
+                       spin_lock(&ns->ns_lock);
                         continue;
                 }
                 LASSERT(!lock->l_readers && !lock->l_writers);
@@ -1671,12 +1671,12 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, cfs_list_t *cancels,
                 cfs_list_add(&lock->l_bl_ast, cancels);
                 unlock_res_and_lock(lock);
                 lu_ref_del(&lock->l_reference, __FUNCTION__, cfs_current());
-                cfs_spin_lock(&ns->ns_lock);
-                added++;
-                unused--;
-        }
-        cfs_spin_unlock(&ns->ns_lock);
-        RETURN(added);
+               spin_lock(&ns->ns_lock);
+               added++;
+               unused--;
+       }
+       spin_unlock(&ns->ns_lock);
+       RETURN(added);
 }
 
 int ldlm_cancel_lru_local(struct ldlm_namespace *ns, cfs_list_t *cancels,
index 76bbac8..772eaba 100644 (file)
@@ -55,10 +55,10 @@ cfs_mem_cache_t *ldlm_resource_slab, *ldlm_lock_slab;
 cfs_atomic_t ldlm_srv_namespace_nr = CFS_ATOMIC_INIT(0);
 cfs_atomic_t ldlm_cli_namespace_nr = CFS_ATOMIC_INIT(0);
 
-cfs_mutex_t ldlm_srv_namespace_lock;
+struct mutex ldlm_srv_namespace_lock;
 CFS_LIST_HEAD(ldlm_srv_namespace_list);
 
-cfs_mutex_t ldlm_cli_namespace_lock;
+struct mutex ldlm_cli_namespace_lock;
 CFS_LIST_HEAD(ldlm_cli_namespace_list);
 
 cfs_proc_dir_entry_t *ldlm_type_proc_dir = NULL;
@@ -649,7 +649,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
 
         CFS_INIT_LIST_HEAD(&ns->ns_list_chain);
         CFS_INIT_LIST_HEAD(&ns->ns_unused_list);
-        cfs_spin_lock_init(&ns->ns_lock);
+       spin_lock_init(&ns->ns_lock);
         cfs_atomic_set(&ns->ns_bref, 0);
         cfs_waitq_init(&ns->ns_waitq);
 
@@ -880,9 +880,9 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
                 return;
         }
 
-        cfs_spin_lock(&ns->ns_lock);
-        ns->ns_stopping = 1;
-        cfs_spin_unlock(&ns->ns_lock);
+       spin_lock(&ns->ns_lock);
+       ns->ns_stopping = 1;
+       spin_unlock(&ns->ns_lock);
 
         /*
          * Can fail with -EINTR when force == 0 in which case try harder.
@@ -976,36 +976,36 @@ EXPORT_SYMBOL(ldlm_namespace_get);
 
 void ldlm_namespace_put(struct ldlm_namespace *ns)
 {
-        if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
-                cfs_waitq_signal(&ns->ns_waitq);
-                cfs_spin_unlock(&ns->ns_lock);
-        }
+       if (cfs_atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
+               cfs_waitq_signal(&ns->ns_waitq);
+               spin_unlock(&ns->ns_lock);
+       }
 }
 EXPORT_SYMBOL(ldlm_namespace_put);
 
 /* Register @ns in the list of namespaces */
 void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
 {
-        cfs_mutex_lock(ldlm_namespace_lock(client));
-        LASSERT(cfs_list_empty(&ns->ns_list_chain));
-        cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
-        cfs_atomic_inc(ldlm_namespace_nr(client));
-        cfs_mutex_unlock(ldlm_namespace_lock(client));
+       mutex_lock(ldlm_namespace_lock(client));
+       LASSERT(cfs_list_empty(&ns->ns_list_chain));
+       cfs_list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
+       cfs_atomic_inc(ldlm_namespace_nr(client));
+       mutex_unlock(ldlm_namespace_lock(client));
 }
 
 /* Unregister @ns from the list of namespaces */
 void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
 {
-        cfs_mutex_lock(ldlm_namespace_lock(client));
-        LASSERT(!cfs_list_empty(&ns->ns_list_chain));
-        /*
-         * Some asserts and possibly other parts of code still using
-         * list_empty(&ns->ns_list_chain). This is why it is important
-         * to use list_del_init() here.
-         */
-        cfs_list_del_init(&ns->ns_list_chain);
-        cfs_atomic_dec(ldlm_namespace_nr(client));
-        cfs_mutex_unlock(ldlm_namespace_lock(client));
+       mutex_lock(ldlm_namespace_lock(client));
+       LASSERT(!cfs_list_empty(&ns->ns_list_chain));
+       /*
+        * Some asserts and possibly other parts of code still using
+        * list_empty(&ns->ns_list_chain). This is why it is important
+        * to use list_del_init() here.
+        */
+       cfs_list_del_init(&ns->ns_list_chain);
+       cfs_atomic_dec(ldlm_namespace_nr(client));
+       mutex_unlock(ldlm_namespace_lock(client));
 }
 
 /* Should be called under ldlm_namespace_lock(client) taken */
@@ -1046,15 +1046,15 @@ static struct ldlm_resource *ldlm_resource_new(void)
         }
 
         cfs_atomic_set(&res->lr_refcount, 1);
-        cfs_spin_lock_init(&res->lr_lock);
-        lu_ref_init(&res->lr_reference);
+       spin_lock_init(&res->lr_lock);
+       lu_ref_init(&res->lr_reference);
 
-        /* one who creates the resource must unlock
-         * the mutex after lvb initialization */
-        cfs_mutex_init(&res->lr_lvb_mutex);
-        cfs_mutex_lock(&res->lr_lvb_mutex);
+       /* one who creates the resource must unlock
+        * the mutex after lvb initialization */
+       mutex_init(&res->lr_lvb_mutex);
+       mutex_lock(&res->lr_lvb_mutex);
 
-        return res;
+       return res;
 }
 
 /* Args: unlocked namespace
@@ -1081,8 +1081,8 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
                 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
                 /* synchronize WRT resource creation */
                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
-                        cfs_mutex_lock(&res->lr_lvb_mutex);
-                        cfs_mutex_unlock(&res->lr_lvb_mutex);
+                       mutex_lock(&res->lr_lvb_mutex);
+                       mutex_unlock(&res->lr_lvb_mutex);
                 }
                 return res;
         }
@@ -1114,14 +1114,14 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
                 /* clean lu_ref for failed resource */
                lu_ref_fini(&res->lr_reference);
                /* We have taken lr_lvb_mutex. Drop it. */
-               cfs_mutex_unlock(&res->lr_lvb_mutex);
+               mutex_unlock(&res->lr_lvb_mutex);
                OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
 
                 res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
                 /* synchronize WRT resource creation */
                 if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
-                        cfs_mutex_lock(&res->lr_lvb_mutex);
-                        cfs_mutex_unlock(&res->lr_lvb_mutex);
+                       mutex_lock(&res->lr_lvb_mutex);
+                       mutex_unlock(&res->lr_lvb_mutex);
                 }
                 return res;
         }
@@ -1142,7 +1142,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
        }
 
        /* we create resource with locked lr_lvb_mutex */
-       cfs_mutex_unlock(&res->lr_lvb_mutex);
+       mutex_unlock(&res->lr_lvb_mutex);
 
        return res;
 }
@@ -1303,7 +1303,7 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
         if (!((libcfs_debug | D_ERROR) & level))
                 return;
 
-        cfs_mutex_lock(ldlm_namespace_lock(client));
+       mutex_lock(ldlm_namespace_lock(client));
 
         cfs_list_for_each(tmp, ldlm_namespace_list(client)) {
                 struct ldlm_namespace *ns;
@@ -1311,7 +1311,7 @@ void ldlm_dump_all_namespaces(ldlm_side_t client, int level)
                 ldlm_namespace_dump(level, ns);
         }
 
-        cfs_mutex_unlock(ldlm_namespace_lock(client));
+       mutex_unlock(ldlm_namespace_lock(client));
 }
 EXPORT_SYMBOL(ldlm_dump_all_namespaces);
 
@@ -1343,9 +1343,9 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
         cfs_hash_for_each_nolock(ns->ns_rs_hash,
                                  ldlm_res_hash_dump,
                                  (void *)(unsigned long)level);
-        cfs_spin_lock(&ns->ns_lock);
-        ns->ns_next_dump = cfs_time_shift(10);
-        cfs_spin_unlock(&ns->ns_lock);
+       spin_lock(&ns->ns_lock);
+       ns->ns_next_dump = cfs_time_shift(10);
+       spin_unlock(&ns->ns_lock);
 }
 EXPORT_SYMBOL(ldlm_namespace_dump);
 
index f94d0de..81c58a7 100644 (file)
@@ -474,7 +474,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
                 ibits = MDS_INODELOCK_LOOKUP;
                 if (!ll_have_md_lock(inode, &ibits, LCK_MINMODE))
                         goto do_lock;
-                cfs_mutex_lock(&lli->lli_och_mutex);
+               mutex_lock(&lli->lli_och_mutex);
                 if (*och_p) { /* Everything is open already, do nothing */
                         /*(*och_usecount)++;  Do not let them steal our open
                           handle from under us */
@@ -486,10 +486,10 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
                            hope the lock won't be invalidated in between. But
                            if it would be, we'll reopen the open request to
                            MDS later during file open path */
-                        cfs_mutex_unlock(&lli->lli_och_mutex);
+                       mutex_unlock(&lli->lli_och_mutex);
                         RETURN(1);
                 } else {
-                        cfs_mutex_unlock(&lli->lli_och_mutex);
+                       mutex_unlock(&lli->lli_och_mutex);
                 }
         }
 
index 85b4a48..5ae75b7 100644 (file)
@@ -390,7 +390,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
         }
         ldlm_lock_dump_handle(D_OTHER, &lockh);
 
-        cfs_mutex_lock(&lli->lli_readdir_mutex);
+       mutex_lock(&lli->lli_readdir_mutex);
         page = ll_dir_page_locate(dir, &lhash, &start, &end);
         if (IS_ERR(page)) {
                 CERROR("dir page locate: "DFID" at "LPU64": rc %ld\n",
@@ -463,7 +463,7 @@ hash_collision:
                 goto fail;
         }
 out_unlock:
-        cfs_mutex_unlock(&lli->lli_readdir_mutex);
+       mutex_unlock(&lli->lli_readdir_mutex);
         ldlm_lock_decref(&lockh, mode);
         return page;
 
@@ -1470,7 +1470,7 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
         loff_t ret = -EINVAL;
         ENTRY;
 
-        cfs_mutex_lock(&inode->i_mutex);
+       mutex_lock(&inode->i_mutex);
         switch (origin) {
                 case SEEK_SET:
                         break;
@@ -1508,7 +1508,7 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
         GOTO(out, ret);
 
 out:
-        cfs_mutex_unlock(&inode->i_mutex);
+       mutex_unlock(&inode->i_mutex);
         return ret;
 }
 
index 4249f6d..072a53c 100644 (file)
@@ -201,15 +201,15 @@ int ll_md_real_close(struct inode *inode, int flags)
                 och_usecount = &lli->lli_open_fd_read_count;
         }
 
-        cfs_mutex_lock(&lli->lli_och_mutex);
+       mutex_lock(&lli->lli_och_mutex);
         if (*och_usecount) { /* There are still users of this handle, so
                                 skip freeing it. */
-                cfs_mutex_unlock(&lli->lli_och_mutex);
+               mutex_unlock(&lli->lli_och_mutex);
                 RETURN(0);
         }
         och=*och_p;
         *och_p = NULL;
-        cfs_mutex_unlock(&lli->lli_och_mutex);
+       mutex_unlock(&lli->lli_och_mutex);
 
         if (och) { /* There might be a race and somebody have freed this och
                       already */
@@ -241,7 +241,7 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
                 struct inode *inode = file->f_dentry->d_inode;
                 ldlm_policy_data_t policy = {.l_inodebits={MDS_INODELOCK_OPEN}};
 
-                cfs_mutex_lock(&lli->lli_och_mutex);
+               mutex_lock(&lli->lli_och_mutex);
                 if (fd->fd_omode & FMODE_WRITE) {
                         lockmode = LCK_CW;
                         LASSERT(lli->lli_open_fd_write_count);
@@ -255,7 +255,7 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
                         LASSERT(lli->lli_open_fd_read_count);
                         lli->lli_open_fd_read_count--;
                 }
-                cfs_mutex_unlock(&lli->lli_och_mutex);
+               mutex_unlock(&lli->lli_och_mutex);
 
                 if (!md_lock_match(md_exp, flags, ll_inode2fid(inode),
                                    LDLM_IBITS, &policy, lockmode,
@@ -513,14 +513,14 @@ int ll_file_open(struct inode *inode, struct file *file)
 
         fd->fd_file = file;
         if (S_ISDIR(inode->i_mode)) {
-                cfs_spin_lock(&lli->lli_sa_lock);
-                if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
-                    lli->lli_opendir_pid == 0) {
-                        lli->lli_opendir_key = fd;
-                        lli->lli_opendir_pid = cfs_curproc_pid();
-                        opendir_set = 1;
-                }
-                cfs_spin_unlock(&lli->lli_sa_lock);
+               spin_lock(&lli->lli_sa_lock);
+               if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
+                   lli->lli_opendir_pid == 0) {
+                       lli->lli_opendir_key = fd;
+                       lli->lli_opendir_pid = cfs_curproc_pid();
+                       opendir_set = 1;
+               }
+               spin_unlock(&lli->lli_sa_lock);
         }
 
         if (inode->i_sb->s_root == file->f_dentry) {
@@ -570,14 +570,14 @@ restart:
                 och_usecount = &lli->lli_open_fd_read_count;
         }
 
-        cfs_mutex_lock(&lli->lli_och_mutex);
+       mutex_lock(&lli->lli_och_mutex);
         if (*och_p) { /* Open handle is present */
                 if (it_disposition(it, DISP_OPEN_OPEN)) {
                         /* Well, there's extra open request that we do not need,
                            let's close it somehow. This will decref request. */
                         rc = it_open_error(DISP_OPEN_OPEN, it);
                         if (rc) {
-                                cfs_mutex_unlock(&lli->lli_och_mutex);
+                               mutex_unlock(&lli->lli_och_mutex);
                                 GOTO(out_openerr, rc);
                         }
 
@@ -588,7 +588,7 @@ restart:
                 rc = ll_local_open(file, it, fd, NULL);
                 if (rc) {
                         (*och_usecount)--;
-                        cfs_mutex_unlock(&lli->lli_och_mutex);
+                       mutex_unlock(&lli->lli_och_mutex);
                         GOTO(out_openerr, rc);
                 }
         } else {
@@ -599,7 +599,7 @@ restart:
                            could be cancelled, and since blocking ast handler
                            would attempt to grab och_mutex as well, that would
                            result in a deadlock */
-                        cfs_mutex_unlock(&lli->lli_och_mutex);
+                       mutex_unlock(&lli->lli_och_mutex);
                         it->it_create_mode |= M_CHECK_STALE;
                         rc = ll_intent_file_open(file, NULL, 0, it);
                         it->it_create_mode &= ~M_CHECK_STALE;
@@ -629,7 +629,7 @@ restart:
                 if (rc)
                         GOTO(out_och_free, rc);
         }
-        cfs_mutex_unlock(&lli->lli_och_mutex);
+       mutex_unlock(&lli->lli_och_mutex);
         fd = NULL;
 
         /* Must do this outside lli_och_mutex lock to prevent deadlock where
@@ -657,7 +657,7 @@ out_och_free:
                         *och_p = NULL; /* OBD_FREE writes some magic there */
                         (*och_usecount)--;
                 }
-                cfs_mutex_unlock(&lli->lli_och_mutex);
+               mutex_unlock(&lli->lli_och_mutex);
 
 out_openerr:
                 if (opendir_set != 0)
@@ -857,12 +857,12 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
 #endif
                         if ((iot == CIT_WRITE) &&
                             !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
-                                if (cfs_mutex_lock_interruptible(&lli->
+                               if (mutex_lock_interruptible(&lli->
                                                                lli_write_mutex))
                                         GOTO(out, result = -ERESTARTSYS);
                                 write_mutex_locked = 1;
                         } else if (iot == CIT_READ) {
-                                cfs_down_read(&lli->lli_trunc_sem);
+                               down_read(&lli->lli_trunc_sem);
                         }
                         break;
                 case IO_SENDFILE:
@@ -879,9 +879,9 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
                 }
                 result = cl_io_loop(env, io);
                 if (write_mutex_locked)
-                        cfs_mutex_unlock(&lli->lli_write_mutex);
+                       mutex_unlock(&lli->lli_write_mutex);
                 else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
-                        cfs_up_read(&lli->lli_trunc_sem);
+                       up_read(&lli->lli_trunc_sem);
         } else {
                 /* cl_io_rw_init() handled IO */
                 result = io->ci_result;
@@ -1506,47 +1506,47 @@ int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
         if (ll_file_nolock(file))
                 RETURN(-EOPNOTSUPP);
 
-        cfs_spin_lock(&lli->lli_lock);
-        if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
-                CWARN("group lock already existed with gid %lu\n",
-                       fd->fd_grouplock.cg_gid);
-                cfs_spin_unlock(&lli->lli_lock);
-                RETURN(-EINVAL);
-        }
-        LASSERT(fd->fd_grouplock.cg_lock == NULL);
-        cfs_spin_unlock(&lli->lli_lock);
+       spin_lock(&lli->lli_lock);
+       if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+               CWARN("group lock already existed with gid %lu\n",
+                     fd->fd_grouplock.cg_gid);
+               spin_unlock(&lli->lli_lock);
+               RETURN(-EINVAL);
+       }
+       LASSERT(fd->fd_grouplock.cg_lock == NULL);
+       spin_unlock(&lli->lli_lock);
 
-        rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
-                              arg, (file->f_flags & O_NONBLOCK), &grouplock);
-        if (rc)
-                RETURN(rc);
+       rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+                             arg, (file->f_flags & O_NONBLOCK), &grouplock);
+       if (rc)
+               RETURN(rc);
 
-        cfs_spin_lock(&lli->lli_lock);
-        if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
-                cfs_spin_unlock(&lli->lli_lock);
-                CERROR("another thread just won the race\n");
-                cl_put_grouplock(&grouplock);
-                RETURN(-EINVAL);
-        }
+       spin_lock(&lli->lli_lock);
+       if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
+               spin_unlock(&lli->lli_lock);
+               CERROR("another thread just won the race\n");
+               cl_put_grouplock(&grouplock);
+               RETURN(-EINVAL);
+       }
 
-        fd->fd_flags |= LL_FILE_GROUP_LOCKED;
-        fd->fd_grouplock = grouplock;
-        cfs_spin_unlock(&lli->lli_lock);
+       fd->fd_flags |= LL_FILE_GROUP_LOCKED;
+       fd->fd_grouplock = grouplock;
+       spin_unlock(&lli->lli_lock);
 
-        CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
-        RETURN(0);
+       CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
+       RETURN(0);
 }
 
 int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
 {
-        struct ll_inode_info   *lli = ll_i2info(inode);
-        struct ll_file_data    *fd = LUSTRE_FPRIVATE(file);
-        struct ccc_grouplock    grouplock;
-        ENTRY;
+       struct ll_inode_info   *lli = ll_i2info(inode);
+       struct ll_file_data    *fd = LUSTRE_FPRIVATE(file);
+       struct ccc_grouplock    grouplock;
+       ENTRY;
 
-        cfs_spin_lock(&lli->lli_lock);
-        if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
-                cfs_spin_unlock(&lli->lli_lock);
+       spin_lock(&lli->lli_lock);
+       if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+               spin_unlock(&lli->lli_lock);
                 CWARN("no group lock held\n");
                 RETURN(-EINVAL);
         }
@@ -1555,18 +1555,18 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
         if (fd->fd_grouplock.cg_gid != arg) {
                 CWARN("group lock %lu doesn't match current id %lu\n",
                        arg, fd->fd_grouplock.cg_gid);
-                cfs_spin_unlock(&lli->lli_lock);
-                RETURN(-EINVAL);
-        }
+               spin_unlock(&lli->lli_lock);
+               RETURN(-EINVAL);
+       }
 
-        grouplock = fd->fd_grouplock;
-        memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
-        fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
-        cfs_spin_unlock(&lli->lli_lock);
+       grouplock = fd->fd_grouplock;
+       memset(&fd->fd_grouplock, 0, sizeof(fd->fd_grouplock));
+       fd->fd_flags &= ~LL_FILE_GROUP_LOCKED;
+       spin_unlock(&lli->lli_lock);
 
-        cl_put_grouplock(&grouplock);
-        CDEBUG(D_INFO, "group lock %lu released\n", arg);
-        RETURN(0);
+       cl_put_grouplock(&grouplock);
+       CDEBUG(D_INFO, "group lock %lu released\n", arg);
+       RETURN(0);
 }
 
 /**
@@ -2546,10 +2546,10 @@ struct posix_acl * ll_get_acl(struct inode *inode, int type)
        struct posix_acl *acl = NULL;
        ENTRY;
 
-       cfs_spin_lock(&lli->lli_lock);
+       spin_lock(&lli->lli_lock);
        /* VFS' acl_permission_check->check_acl will release the refcount */
        acl = posix_acl_dup(lli->lli_posix_acl);
-       cfs_spin_unlock(&lli->lli_lock);
+       spin_unlock(&lli->lli_lock);
 
        RETURN(acl);
 }
@@ -2727,7 +2727,7 @@ struct inode_operations ll_file_inode_operations = {
 
 /* dynamic ioctl number support routins */
 static struct llioc_ctl_data {
-        cfs_rw_semaphore_t      ioc_sem;
+       struct rw_semaphore     ioc_sem;
         cfs_list_t              ioc_head;
 } llioc = {
         __RWSEM_INITIALIZER(llioc.ioc_sem),
@@ -2764,9 +2764,9 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
         in_data->iocd_count = count;
         memcpy(in_data->iocd_cmd, cmd, sizeof(unsigned int) * count);
 
-        cfs_down_write(&llioc.ioc_sem);
+       down_write(&llioc.ioc_sem);
         cfs_list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
-        cfs_up_write(&llioc.ioc_sem);
+       up_write(&llioc.ioc_sem);
 
         RETURN(in_data);
 }
@@ -2778,19 +2778,19 @@ void ll_iocontrol_unregister(void *magic)
         if (magic == NULL)
                 return;
 
-        cfs_down_write(&llioc.ioc_sem);
+       down_write(&llioc.ioc_sem);
         cfs_list_for_each_entry(tmp, &llioc.ioc_head, iocd_list) {
                 if (tmp == magic) {
                         unsigned int size = tmp->iocd_size;
 
                         cfs_list_del(&tmp->iocd_list);
-                        cfs_up_write(&llioc.ioc_sem);
+                       up_write(&llioc.ioc_sem);
 
                         OBD_FREE(tmp, size);
                         return;
                 }
         }
-        cfs_up_write(&llioc.ioc_sem);
+       up_write(&llioc.ioc_sem);
 
         CWARN("didn't find iocontrol register block with magic: %p\n", magic);
 }
@@ -2805,7 +2805,7 @@ enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
         struct llioc_data *data;
         int rc = -EINVAL, i;
 
-        cfs_down_read(&llioc.ioc_sem);
+       down_read(&llioc.ioc_sem);
         cfs_list_for_each_entry(data, &llioc.ioc_head, iocd_list) {
                 for (i = 0; i < data->iocd_count; i++) {
                         if (cmd != data->iocd_cmd[i])
@@ -2818,7 +2818,7 @@ enum llioc_iter ll_iocontrol_call(struct inode *inode, struct file *file,
                 if (ret == LLIOC_STOP)
                         break;
         }
-        cfs_up_read(&llioc.ioc_sem);
+       up_read(&llioc.ioc_sem);
 
         if (rcp)
                 *rcp = rc;
@@ -2901,7 +2901,7 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
                RETURN(PTR_ERR(op_data));
 
        /* take layout lock mutex to enqueue layout lock exclusively. */
-       cfs_mutex_lock(&lli->lli_layout_mutex);
+       mutex_lock(&lli->lli_layout_mutex);
 
        /* try again inside layout mutex */
        mode = ll_take_md_lock(inode, MDS_INODELOCK_LAYOUT, &lockh,
@@ -2910,7 +2910,7 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
                *gen = lli->lli_layout_gen + 1;
 
                ldlm_lock_decref(&lockh, mode);
-               cfs_mutex_unlock(&lli->lli_layout_mutex);
+               mutex_unlock(&lli->lli_layout_mutex);
                ll_finish_md_op_data(op_data);
                RETURN(0);
        }
@@ -2968,7 +2968,7 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
        }
        ll_intent_drop_lock(&it);
 
-       cfs_mutex_unlock(&lli->lli_layout_mutex);
+       mutex_unlock(&lli->lli_layout_mutex);
        ll_finish_md_op_data(op_data);
 
        RETURN(rc);
index 8ac120d..6be6b17 100644 (file)
@@ -92,13 +92,13 @@ static inline int capa_is_to_expire(struct obd_capa *ocapa)
 
 static inline int have_expired_capa(void)
 {
-        struct obd_capa *ocapa = NULL;
-        int expired = 0;
+       struct obd_capa *ocapa = NULL;
+       int expired = 0;
 
-        /* if ll_capa_list has client capa to expire or ll_idle_capas has
-         * expired capa, return 1.
-         */
-        cfs_spin_lock(&capa_lock);
+       /* if ll_capa_list has client capa to expire or ll_idle_capas has
+        * expired capa, return 1.
+        */
+       spin_lock(&capa_lock);
         if (!cfs_list_empty(ll_capa_list)) {
                 ocapa = cfs_list_entry(ll_capa_list->next, struct obd_capa,
                                        c_list);
@@ -112,11 +112,11 @@ static inline int have_expired_capa(void)
                 if (!expired)
                         update_capa_timer(ocapa, ocapa->c_expiry);
         }
-        cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 
-        if (expired)
-                DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
-        return expired;
+       if (expired)
+               DEBUG_CAPA(D_SEC, &ocapa->c_capa, "expired");
+       return expired;
 }
 
 static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
@@ -189,7 +189,7 @@ static int capa_thread_main(void *unused)
 
                 next = NULL;
 
-                cfs_spin_lock(&capa_lock);
+               spin_lock(&capa_lock);
                 cfs_list_for_each_entry_safe(ocapa, tmp, ll_capa_list, c_list) {
                         __u64 ibits;
 
@@ -241,10 +241,10 @@ static int capa_thread_main(void *unused)
 
                         capa_get(ocapa);
                         ll_capa_renewed++;
-                        cfs_spin_unlock(&capa_lock);
-                        rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
-                                           ll_update_capa);
-                        cfs_spin_lock(&capa_lock);
+                       spin_unlock(&capa_lock);
+                       rc = md_renew_capa(ll_i2mdexp(inode), ocapa,
+                                          ll_update_capa);
+                       spin_lock(&capa_lock);
                         if (rc) {
                                 DEBUG_CAPA(D_ERROR, &ocapa->c_capa,
                                            "renew failed: %d", rc);
@@ -278,12 +278,12 @@ static int capa_thread_main(void *unused)
                         ll_delete_capa(ocapa);
                 }
 
-                cfs_spin_unlock(&capa_lock);
-        }
+               spin_unlock(&capa_lock);
+       }
 
-        thread_set_flags(&ll_capa_thread, SVC_STOPPED);
-        cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
-        RETURN(0);
+       thread_set_flags(&ll_capa_thread, SVC_STOPPED);
+       cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
+       RETURN(0);
 }
 
 void ll_capa_timer_callback(unsigned long unused)
@@ -331,7 +331,7 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
         LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
                 opc == CAPA_OPC_OSS_TRUNC);
 
-        cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
         cfs_list_for_each_entry(ocapa, &lli->lli_oss_capas, u.cli.lli_list) {
                 if (capa_is_expired(ocapa))
                         continue;
@@ -368,9 +368,9 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
                         cfs_atomic_set(&ll_capa_debug, 0);
                 }
         }
-        cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 
-        RETURN(ocapa);
+       RETURN(ocapa);
 }
 EXPORT_SYMBOL(ll_osscapa_get);
 
@@ -385,9 +385,9 @@ struct obd_capa *ll_mdscapa_get(struct inode *inode)
         if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
                 RETURN(NULL);
 
-        cfs_spin_lock(&capa_lock);
-        ocapa = capa_get(lli->lli_mds_capa);
-        cfs_spin_unlock(&capa_lock);
+       spin_lock(&capa_lock);
+       ocapa = capa_get(lli->lli_mds_capa);
+       spin_unlock(&capa_lock);
         if (!ocapa && cfs_atomic_read(&ll_capa_debug)) {
                 CERROR("no mds capability for "DFID"\n", PFID(&lli->lli_fid));
                 cfs_atomic_set(&ll_capa_debug, 0);
@@ -410,9 +410,9 @@ static struct obd_capa *do_add_mds_capa(struct inode *inode,
 
                 DEBUG_CAPA(D_SEC, capa, "add MDS");
         } else {
-                cfs_spin_lock(&old->c_lock);
-                old->c_capa = *capa;
-                cfs_spin_unlock(&old->c_lock);
+               spin_lock(&old->c_lock);
+               old->c_capa = *capa;
+               spin_unlock(&old->c_lock);
 
                 DEBUG_CAPA(D_SEC, capa, "update MDS");
 
@@ -481,9 +481,9 @@ static struct obd_capa *do_add_oss_capa(struct inode *inode,
 
                 DEBUG_CAPA(D_SEC, capa, "add OSS");
         } else {
-                cfs_spin_lock(&old->c_lock);
-                old->c_capa = *capa;
-                cfs_spin_unlock(&old->c_lock);
+               spin_lock(&old->c_lock);
+               old->c_capa = *capa;
+               spin_unlock(&old->c_lock);
 
                 DEBUG_CAPA(D_SEC, capa, "update OSS");
 
@@ -497,7 +497,7 @@ static struct obd_capa *do_add_oss_capa(struct inode *inode,
 
 struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
 {
-        cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
         ocapa = capa_for_mds(&ocapa->c_capa) ? do_add_mds_capa(inode, ocapa) :
                                                do_add_oss_capa(inode, ocapa);
 
@@ -510,10 +510,10 @@ struct obd_capa *ll_add_capa(struct inode *inode, struct obd_capa *ocapa)
                 update_capa_timer(ocapa, capa_renewal_time(ocapa));
         }
 
-        cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 
-        cfs_atomic_set(&ll_capa_debug, 1);
-        return ocapa;
+       cfs_atomic_set(&ll_capa_debug, 1);
+       return ocapa;
 }
 
 static inline void delay_capa_renew(struct obd_capa *oc, cfs_time_t delay)
@@ -533,7 +533,7 @@ int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
         if (IS_ERR(capa)) {
                 /* set error code */
                 rc = PTR_ERR(capa);
-                cfs_spin_lock(&capa_lock);
+               spin_lock(&capa_lock);
                 if (rc == -ENOENT) {
                         DEBUG_CAPA(D_SEC, &ocapa->c_capa,
                                    "renewal canceled because object removed");
@@ -558,34 +558,34 @@ int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
 
                 cfs_list_del_init(&ocapa->c_list);
                 sort_add_capa(ocapa, &ll_idle_capas);
-                cfs_spin_unlock(&capa_lock);
-
-                capa_put(ocapa);
-                iput(inode);
-                RETURN(rc);
-        }
-
-        cfs_spin_lock(&ocapa->c_lock);
-        LASSERT(!memcmp(&ocapa->c_capa, capa,
-                        offsetof(struct lustre_capa, lc_opc)));
-        ocapa->c_capa = *capa;
-        set_capa_expiry(ocapa);
-        cfs_spin_unlock(&ocapa->c_lock);
-
-        cfs_spin_lock(&capa_lock);
-        if (capa_for_oss(capa))
-                inode_add_oss_capa(inode, ocapa);
-        DEBUG_CAPA(D_SEC, capa, "renew");
-        EXIT;
+               spin_unlock(&capa_lock);
+
+               capa_put(ocapa);
+               iput(inode);
+               RETURN(rc);
+       }
+
+       spin_lock(&ocapa->c_lock);
+       LASSERT(!memcmp(&ocapa->c_capa, capa,
+                       offsetof(struct lustre_capa, lc_opc)));
+       ocapa->c_capa = *capa;
+       set_capa_expiry(ocapa);
+       spin_unlock(&ocapa->c_lock);
+
+       spin_lock(&capa_lock);
+       if (capa_for_oss(capa))
+               inode_add_oss_capa(inode, ocapa);
+       DEBUG_CAPA(D_SEC, capa, "renew");
+       EXIT;
 retry:
-        cfs_list_del_init(&ocapa->c_list);
-        sort_add_capa(ocapa, ll_capa_list);
-        update_capa_timer(ocapa, capa_renewal_time(ocapa));
-        cfs_spin_unlock(&capa_lock);
-
-        capa_put(ocapa);
-        iput(inode);
-        return rc;
+       cfs_list_del_init(&ocapa->c_list);
+       sort_add_capa(ocapa, ll_capa_list);
+       update_capa_timer(ocapa, capa_renewal_time(ocapa));
+       spin_unlock(&capa_lock);
+
+       capa_put(ocapa);
+       iput(inode);
+       return rc;
 }
 
 void ll_capa_open(struct inode *inode)
@@ -628,26 +628,26 @@ void ll_truncate_free_capa(struct obd_capa *ocapa)
         /* release ref when find */
         capa_put(ocapa);
         if (likely(ocapa->c_capa.lc_opc == CAPA_OPC_OSS_TRUNC)) {
-                cfs_spin_lock(&capa_lock);
-                ll_delete_capa(ocapa);
-                cfs_spin_unlock(&capa_lock);
-        }
+               spin_lock(&capa_lock);
+               ll_delete_capa(ocapa);
+               spin_unlock(&capa_lock);
+       }
 }
 
 void ll_clear_inode_capas(struct inode *inode)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
-        struct obd_capa *ocapa, *tmp;
-
-        cfs_spin_lock(&capa_lock);
-        ocapa = lli->lli_mds_capa;
-        if (ocapa)
-                ll_delete_capa(ocapa);
-
-        cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
-                                     u.cli.lli_list)
-                ll_delete_capa(ocapa);
-        cfs_spin_unlock(&capa_lock);
+       struct ll_inode_info *lli = ll_i2info(inode);
+       struct obd_capa *ocapa, *tmp;
+
+       spin_lock(&capa_lock);
+       ocapa = lli->lli_mds_capa;
+       if (ocapa)
+               ll_delete_capa(ocapa);
+
+       cfs_list_for_each_entry_safe(ocapa, tmp, &lli->lli_oss_capas,
+                                    u.cli.lli_list)
+               ll_delete_capa(ocapa);
+       spin_unlock(&capa_lock);
 }
 
 void ll_print_capa_stat(struct ll_sb_info *sbi)
index 8136658..fe81fa2 100644 (file)
 /** records that a write is in flight */
 void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
 {
-        struct ll_inode_info *lli = ll_i2info(club->cob_inode);
-
-        ENTRY;
-        cfs_spin_lock(&lli->lli_lock);
-        lli->lli_flags |= LLIF_SOM_DIRTY;
-        if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
-                cfs_list_add(&page->cpg_pending_linkage,
-                             &club->cob_pending_list);
-        cfs_spin_unlock(&lli->lli_lock);
-        EXIT;
+       struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+
+       ENTRY;
+       spin_lock(&lli->lli_lock);
+       lli->lli_flags |= LLIF_SOM_DIRTY;
+       if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
+               cfs_list_add(&page->cpg_pending_linkage,
+                            &club->cob_pending_list);
+       spin_unlock(&lli->lli_lock);
+       EXIT;
 }
 
 /** records that a write has completed */
 void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
 {
-        struct ll_inode_info *lli = ll_i2info(club->cob_inode);
-        int rc = 0;
-
-        ENTRY;
-        cfs_spin_lock(&lli->lli_lock);
-        if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
-                cfs_list_del_init(&page->cpg_pending_linkage);
-                rc = 1;
-        }
-        cfs_spin_unlock(&lli->lli_lock);
-        if (rc)
-                ll_queue_done_writing(club->cob_inode, 0);
-        EXIT;
+       struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+       int rc = 0;
+
+       ENTRY;
+       spin_lock(&lli->lli_lock);
+       if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
+               cfs_list_del_init(&page->cpg_pending_linkage);
+               rc = 1;
+       }
+       spin_unlock(&lli->lli_lock);
+       if (rc)
+               ll_queue_done_writing(club->cob_inode, 0);
+       EXIT;
 }
 
 /** Queues DONE_WRITING if
@@ -83,11 +83,11 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
  * - inode has no no dirty pages; */
 void ll_queue_done_writing(struct inode *inode, unsigned long flags)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
-        struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
-        ENTRY;
+       struct ll_inode_info *lli = ll_i2info(inode);
+       struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+       ENTRY;
 
-        cfs_spin_lock(&lli->lli_lock);
+       spin_lock(&lli->lli_lock);
         lli->lli_flags |= flags;
 
         if ((lli->lli_flags & LLIF_DONE_WRITING) &&
@@ -100,7 +100,7 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
                               inode->i_ino, inode->i_generation,
                               lli->lli_flags);
                 /* DONE_WRITING is allowed and inode has no dirty page. */
-                cfs_spin_lock(&lcq->lcq_lock);
+               spin_lock(&lcq->lcq_lock);
 
                 LASSERT(cfs_list_empty(&lli->lli_close_list));
                 CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
@@ -116,10 +116,10 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
                 lli->lli_flags &= ~LLIF_DONE_WRITING;
 
                 cfs_waitq_signal(&lcq->lcq_waitq);
-                cfs_spin_unlock(&lcq->lcq_lock);
-        }
-        cfs_spin_unlock(&lli->lli_lock);
-        EXIT;
+               spin_unlock(&lcq->lcq_lock);
+       }
+       spin_unlock(&lli->lli_lock);
+       EXIT;
 }
 
 /** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
@@ -145,22 +145,22 @@ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
 
 /** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
 void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
-                      struct obd_client_handle **och, unsigned long flags)
+                     struct obd_client_handle **och, unsigned long flags)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
-        struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
-        ENTRY;
-
-        cfs_spin_lock(&lli->lli_lock);
-        if (!(cfs_list_empty(&club->cob_pending_list))) {
-                if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
-                        LASSERT(*och != NULL);
-                        LASSERT(lli->lli_pending_och == NULL);
-                        /* Inode is dirty and there is no pending write done
-                         * request yet, DONE_WRITE is to be sent later. */
-                        lli->lli_flags |= LLIF_EPOCH_PENDING;
-                        lli->lli_pending_och = *och;
-                        cfs_spin_unlock(&lli->lli_lock);
+       struct ll_inode_info *lli = ll_i2info(inode);
+       struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+       ENTRY;
+
+       spin_lock(&lli->lli_lock);
+       if (!(cfs_list_empty(&club->cob_pending_list))) {
+               if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
+                       LASSERT(*och != NULL);
+                       LASSERT(lli->lli_pending_och == NULL);
+                       /* Inode is dirty and there is no pending write done
+                        * request yet, DONE_WRITE is to be sent later. */
+                       lli->lli_flags |= LLIF_EPOCH_PENDING;
+                       lli->lli_pending_och = *och;
+                       spin_unlock(&lli->lli_lock);
 
                         inode = igrab(inode);
                         LASSERT(inode);
@@ -172,7 +172,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
                          * and try DONE_WRITE again later. */
                         LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
                         lli->lli_flags |= LLIF_DONE_WRITING;
-                        cfs_spin_unlock(&lli->lli_lock);
+                       spin_unlock(&lli->lli_lock);
 
                         inode = igrab(inode);
                         LASSERT(inode);
@@ -192,26 +192,26 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
         } else {
                 /* Pack Size-on-MDS inode attributes only if they has changed */
                 if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
-                        cfs_spin_unlock(&lli->lli_lock);
-                        GOTO(out, 0);
-                }
-
-                /* There is a pending DONE_WRITE -- close epoch with no
-                 * attribute change. */
-                if (lli->lli_flags & LLIF_EPOCH_PENDING) {
-                        cfs_spin_unlock(&lli->lli_lock);
-                        GOTO(out, 0);
-                }
-        }
-
-        LASSERT(cfs_list_empty(&club->cob_pending_list));
-        lli->lli_flags &= ~LLIF_SOM_DIRTY;
-        cfs_spin_unlock(&lli->lli_lock);
-        ll_done_writing_attr(inode, op_data);
-
-        EXIT;
+                       spin_unlock(&lli->lli_lock);
+                       GOTO(out, 0);
+               }
+
+               /* There is a pending DONE_WRITE -- close epoch with no
+                * attribute change. */
+               if (lli->lli_flags & LLIF_EPOCH_PENDING) {
+                       spin_unlock(&lli->lli_lock);
+                       GOTO(out, 0);
+               }
+       }
+
+       LASSERT(cfs_list_empty(&club->cob_pending_list));
+       lli->lli_flags &= ~LLIF_SOM_DIRTY;
+       spin_unlock(&lli->lli_lock);
+       ll_done_writing_attr(inode, op_data);
+
+       EXIT;
 out:
-        return;
+       return;
 }
 
 /**
@@ -329,9 +329,9 @@ out:
 
 static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
 {
-        struct ll_inode_info *lli = NULL;
+       struct ll_inode_info *lli = NULL;
 
-        cfs_spin_lock(&lcq->lcq_lock);
+       spin_lock(&lcq->lcq_lock);
 
         if (!cfs_list_empty(&lcq->lcq_head)) {
                 lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info,
@@ -340,8 +340,8 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
         } else if (cfs_atomic_read(&lcq->lcq_stop))
                 lli = ERR_PTR(-EALREADY);
 
-        cfs_spin_unlock(&lcq->lcq_lock);
-        return lli;
+       spin_unlock(&lcq->lcq_lock);
+       return lli;
 }
 
 static int ll_close_thread(void *arg)
@@ -355,7 +355,7 @@ static int ll_close_thread(void *arg)
                 cfs_daemonize(name);
         }
 
-        cfs_complete(&lcq->lcq_comp);
+       complete(&lcq->lcq_comp);
 
         while (1) {
                 struct l_wait_info lwi = { 0 };
@@ -376,8 +376,8 @@ static int ll_close_thread(void *arg)
         }
 
         CDEBUG(D_INFO, "ll_close exiting\n");
-        cfs_complete(&lcq->lcq_comp);
-        RETURN(0);
+       complete(&lcq->lcq_comp);
+       RETURN(0);
 }
 
 int ll_close_thread_start(struct ll_close_queue **lcq_ret)
@@ -392,27 +392,27 @@ int ll_close_thread_start(struct ll_close_queue **lcq_ret)
         if (lcq == NULL)
                 return -ENOMEM;
 
-        cfs_spin_lock_init(&lcq->lcq_lock);
-        CFS_INIT_LIST_HEAD(&lcq->lcq_head);
-        cfs_waitq_init(&lcq->lcq_waitq);
-        cfs_init_completion(&lcq->lcq_comp);
+       spin_lock_init(&lcq->lcq_lock);
+       CFS_INIT_LIST_HEAD(&lcq->lcq_head);
+       cfs_waitq_init(&lcq->lcq_waitq);
+       init_completion(&lcq->lcq_comp);
 
-        pid = cfs_create_thread(ll_close_thread, lcq, 0);
-        if (pid < 0) {
-                OBD_FREE(lcq, sizeof(*lcq));
-                return pid;
-        }
+       pid = cfs_create_thread(ll_close_thread, lcq, 0);
+       if (pid < 0) {
+               OBD_FREE(lcq, sizeof(*lcq));
+               return pid;
+       }
 
-        cfs_wait_for_completion(&lcq->lcq_comp);
-        *lcq_ret = lcq;
-        return 0;
+       wait_for_completion(&lcq->lcq_comp);
+       *lcq_ret = lcq;
+       return 0;
 }
 
 void ll_close_thread_shutdown(struct ll_close_queue *lcq)
 {
-        cfs_init_completion(&lcq->lcq_comp);
-        cfs_atomic_inc(&lcq->lcq_stop);
-        cfs_waitq_signal(&lcq->lcq_waitq);
-        cfs_wait_for_completion(&lcq->lcq_comp);
-        OBD_FREE(lcq, sizeof(*lcq));
+       init_completion(&lcq->lcq_comp);
+       cfs_atomic_inc(&lcq->lcq_stop);
+       cfs_waitq_signal(&lcq->lcq_waitq);
+       wait_for_completion(&lcq->lcq_comp);
+       OBD_FREE(lcq, sizeof(*lcq));
 }
index b4eb309..2623e38 100644 (file)
@@ -128,15 +128,15 @@ enum lli_flags {
 };
 
 struct ll_inode_info {
-        __u32                           lli_inode_magic;
-        __u32                           lli_flags;
-        __u64                           lli_ioepoch;
+       __u32                           lli_inode_magic;
+       __u32                           lli_flags;
+       __u64                           lli_ioepoch;
 
-        cfs_spinlock_t                  lli_lock;
-        struct posix_acl               *lli_posix_acl;
+       spinlock_t                      lli_lock;
+       struct posix_acl                *lli_posix_acl;
 
-        cfs_hlist_head_t               *lli_remote_perms;
-        cfs_mutex_t                     lli_rmtperm_mutex;
+       cfs_hlist_head_t                *lli_remote_perms;
+       struct mutex                            lli_rmtperm_mutex;
 
         /* identifying fields for both metadata and data stacks. */
         struct lu_fid                   lli_fid;
@@ -166,21 +166,21 @@ struct ll_inode_info {
         __u64                           lli_open_fd_write_count;
         __u64                           lli_open_fd_exec_count;
         /* Protects access to och pointers and their usage counters */
-        cfs_mutex_t                     lli_och_mutex;
+       struct mutex                    lli_och_mutex;
 
-        struct inode                    lli_vfs_inode;
+       struct inode                    lli_vfs_inode;
 
-        /* the most recent timestamps obtained from mds */
-        struct ost_lvb                  lli_lvb;
-        cfs_spinlock_t                  lli_agl_lock;
+       /* the most recent timestamps obtained from mds */
+       struct ost_lvb                  lli_lvb;
+       spinlock_t                      lli_agl_lock;
 
-        /* Try to make the d::member and f::member are aligned. Before using
-         * these members, make clear whether it is directory or not. */
-        union {
-                /* for directory */
-                struct {
-                        /* serialize normal readdir and statahead-readdir. */
-                        cfs_mutex_t                     d_readdir_mutex;
+       /* Try to make the d::member and f::member are aligned. Before using
+        * these members, make clear whether it is directory or not. */
+       union {
+               /* for directory */
+               struct {
+                       /* serialize normal readdir and statahead-readdir. */
+                       struct mutex                    d_readdir_mutex;
 
                         /* metadata statahead */
                         /* since parent-child threads can share the same @file
@@ -191,11 +191,11 @@ struct ll_inode_info {
                         struct ll_statahead_info       *d_sai;
                         struct posix_acl               *d_def_acl;
                         /* protect statahead stuff. */
-                        cfs_spinlock_t                  d_sa_lock;
-                        /* "opendir_pid" is the token when lookup/revalid
-                         * -- I am the owner of dir statahead. */
-                        pid_t                           d_opendir_pid;
-                } d;
+                       spinlock_t                      d_sa_lock;
+                       /* "opendir_pid" is the token when lookup/revalid
+                        * -- I am the owner of dir statahead. */
+                       pid_t                           d_opendir_pid;
+               } d;
 
 #define lli_readdir_mutex       u.d.d_readdir_mutex
 #define lli_opendir_key         u.d.d_opendir_key
@@ -204,29 +204,29 @@ struct ll_inode_info {
 #define lli_sa_lock             u.d.d_sa_lock
 #define lli_opendir_pid         u.d.d_opendir_pid
 
-                /* for non-directory */
-                struct {
-                        cfs_semaphore_t                 f_size_sem;
-                        void                           *f_size_sem_owner;
-                        char                           *f_symlink_name;
-                        __u64                           f_maxbytes;
-                        /*
-                         * cfs_rw_semaphore_t {
-                         *    signed long      count;     // align u.d.d_def_acl
-                         *    cfs_spinlock_t   wait_lock; // align u.d.d_sa_lock
-                         *    struct list_head wait_list;
-                         * }
-                         */
-                        cfs_rw_semaphore_t              f_trunc_sem;
-                        cfs_mutex_t                     f_write_mutex;
+               /* for non-directory */
+               struct {
+                       struct semaphore                f_size_sem;
+                       void                            *f_size_sem_owner;
+                       char                            *f_symlink_name;
+                       __u64                           f_maxbytes;
+                       /*
+                        * struct rw_semaphore {
+                        *    signed long       count;     // align d.d_def_acl
+                        *    spinlock_t        wait_lock; // align d.d_sa_lock
+                        *    struct list_head wait_list;
+                        * }
+                        */
+                       struct rw_semaphore             f_trunc_sem;
+                       struct mutex                    f_write_mutex;
 
-                       cfs_rw_semaphore_t              f_glimpse_sem;
+                       struct rw_semaphore             f_glimpse_sem;
                        cfs_time_t                      f_glimpse_time;
                        cfs_list_t                      f_agl_list;
                        __u64                           f_agl_index;
 
-                        /* for writepage() only to communicate to fsync */
-                        int                            f_async_rc;
+                       /* for writepage() only to communicate to fsync */
+                       int                             f_async_rc;
 
                        /*
                         * whenever a process try to read/write the file, the
@@ -268,7 +268,7 @@ struct ll_inode_info {
        struct cl_object               *lli_clob;
 
        /* mutex to request for layout lock exclusively. */
-       cfs_mutex_t                     lli_layout_mutex;
+       struct mutex                    lli_layout_mutex;
        /* valid only inside LAYOUT ibits lock, protected by lli_layout_mutex */
        __u32                           lli_layout_gen;
 };
@@ -408,8 +408,8 @@ struct rmtacl_ctl_entry {
 };
 
 struct rmtacl_ctl_table {
-        cfs_spinlock_t   rct_lock;
-        cfs_list_t       rct_entries[RCE_HASHES];
+       spinlock_t      rct_lock;
+       cfs_list_t      rct_entries[RCE_HASHES];
 };
 
 #define EE_HASHES       32
@@ -423,17 +423,17 @@ struct eacl_entry {
 };
 
 struct eacl_table {
-        cfs_spinlock_t   et_lock;
-        cfs_list_t       et_entries[EE_HASHES];
+       spinlock_t      et_lock;
+       cfs_list_t      et_entries[EE_HASHES];
 };
 
 struct ll_sb_info {
-        cfs_list_t                ll_list;
-        /* this protects pglist and ra_info.  It isn't safe to
-         * grab from interrupt contexts */
-        cfs_spinlock_t            ll_lock;
-        cfs_spinlock_t            ll_pp_extent_lock; /* Lock for pp_extent entries */
-        cfs_spinlock_t            ll_process_lock; /* Lock for ll_rw_process_info */
+       cfs_list_t                ll_list;
+       /* this protects pglist and ra_info.  It isn't safe to
+        * grab from interrupt contexts */
+       spinlock_t                ll_lock;
+       spinlock_t                ll_pp_extent_lock; /* pp_extent entry*/
+       spinlock_t                ll_process_lock; /* ll_rw_process_info */
         struct obd_uuid           ll_sb_uuid;
         struct obd_export        *ll_md_exp;
         struct obd_export        *ll_dt_exp;
@@ -502,7 +502,7 @@ struct ll_ra_read {
  * per file-descriptor read-ahead data.
  */
 struct ll_readahead_state {
-        cfs_spinlock_t  ras_lock;
+       spinlock_t  ras_lock;
         /*
          * index of the last page that read(2) needed and that wasn't in the
          * cache. Used by ras_update() to detect seeks.
@@ -601,7 +601,7 @@ struct ll_file_data {
 
 struct lov_stripe_md;
 
-extern cfs_spinlock_t inode_lock;
+extern spinlock_t inode_lock;
 
 extern struct proc_dir_entry *proc_lustre_fs_root;
 
@@ -855,11 +855,11 @@ extern struct inode_operations ll_fast_symlink_inode_operations;
 
 /* llite/llite_close.c */
 struct ll_close_queue {
-        cfs_spinlock_t          lcq_lock;
-        cfs_list_t              lcq_head;
-        cfs_waitq_t             lcq_waitq;
-        cfs_completion_t        lcq_comp;
-        cfs_atomic_t            lcq_stop;
+       spinlock_t              lcq_lock;
+       cfs_list_t              lcq_head;
+       cfs_waitq_t             lcq_waitq;
+       struct completion       lcq_comp;
+       cfs_atomic_t            lcq_stop;
 };
 
 struct ccc_object *cl_inode2ccc(struct inode *inode);
@@ -1244,8 +1244,8 @@ struct ll_statahead_info {
         cfs_list_t              sai_entries_stated;   /* entries stated */
         cfs_list_t              sai_entries_agl; /* AGL entries to be sent */
         cfs_list_t              sai_cache[LL_SA_CACHE_SIZE];
-        cfs_spinlock_t          sai_cache_lock[LL_SA_CACHE_SIZE];
-        cfs_atomic_t            sai_cache_count;      /* entry count in cache */
+       spinlock_t              sai_cache_lock[LL_SA_CACHE_SIZE];
+       cfs_atomic_t            sai_cache_count; /* entry count in cache */
 };
 
 int do_statahead_enter(struct inode *dir, struct dentry **dentry,
@@ -1254,14 +1254,14 @@ void ll_stop_statahead(struct inode *dir, void *key);
 
 static inline int ll_glimpse_size(struct inode *inode)
 {
-        struct ll_inode_info *lli = ll_i2info(inode);
-        int rc;
-
-        cfs_down_read(&lli->lli_glimpse_sem);
-        rc = cl_glimpse_size(inode);
-        lli->lli_glimpse_time = cfs_time_current();
-        cfs_up_read(&lli->lli_glimpse_sem);
-        return rc;
+       struct ll_inode_info *lli = ll_i2info(inode);
+       int rc;
+
+       down_read(&lli->lli_glimpse_sem);
+       rc = cl_glimpse_size(inode);
+       lli->lli_glimpse_time = cfs_time_current();
+       up_read(&lli->lli_glimpse_sem);
+       return rc;
 }
 
 static inline void
index 2d8665b..016096f 100644 (file)
@@ -68,7 +68,7 @@ extern struct address_space_operations_ext ll_aops;
 #endif
 
 #ifndef log2
-#define log2(n) cfs_ffz(~(n))
+#define log2(n) ffz(~(n))
 #endif
 
 static struct ll_sb_info *ll_init_sbi(void)
@@ -85,10 +85,10 @@ static struct ll_sb_info *ll_init_sbi(void)
         if (!sbi)
                 RETURN(NULL);
 
-        cfs_spin_lock_init(&sbi->ll_lock);
-        cfs_mutex_init(&sbi->ll_lco.lco_lock);
-        cfs_spin_lock_init(&sbi->ll_pp_extent_lock);
-        cfs_spin_lock_init(&sbi->ll_process_lock);
+       spin_lock_init(&sbi->ll_lock);
+       mutex_init(&sbi->ll_lco.lco_lock);
+       spin_lock_init(&sbi->ll_pp_extent_lock);
+       spin_lock_init(&sbi->ll_process_lock);
         sbi->ll_rw_stats_on = 0;
 
         si_meminfo(&si);
@@ -107,7 +107,7 @@ static struct ll_sb_info *ll_init_sbi(void)
        cfs_atomic_set(&sbi->ll_cache.ccc_users, 0);
        sbi->ll_cache.ccc_lru_max = lru_page_max;
        cfs_atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
-       cfs_spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
+       spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
        CFS_INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
 
         sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
@@ -122,9 +122,9 @@ static struct ll_sb_info *ll_init_sbi(void)
         class_uuid_unparse(uuid, &sbi->ll_sb_uuid);
         CDEBUG(D_CONFIG, "generated uuid: %s\n", sbi->ll_sb_uuid.uuid);
 
-        cfs_spin_lock(&ll_sb_lock);
-        cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
-        cfs_spin_unlock(&ll_sb_lock);
+       spin_lock(&ll_sb_lock);
+       cfs_list_add_tail(&sbi->ll_list, &ll_super_blocks);
+       spin_unlock(&ll_sb_lock);
 
         sbi->ll_flags |= LL_SBI_VERBOSE;
 #ifdef ENABLE_CHECKSUM
@@ -136,10 +136,10 @@ static struct ll_sb_info *ll_init_sbi(void)
 #endif
 
         for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
-                cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
-                                   pp_r_hist.oh_lock);
-                cfs_spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i]. \
-                                   pp_w_hist.oh_lock);
+               spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
+                              pp_r_hist.oh_lock);
+               spin_lock_init(&sbi->ll_rw_extents_info.pp_extents[i].
+                              pp_w_hist.oh_lock);
         }
 
         /* metadata statahead is enabled by default */
@@ -154,16 +154,16 @@ static struct ll_sb_info *ll_init_sbi(void)
 
 void ll_free_sbi(struct super_block *sb)
 {
-        struct ll_sb_info *sbi = ll_s2sbi(sb);
-        ENTRY;
+       struct ll_sb_info *sbi = ll_s2sbi(sb);
+       ENTRY;
 
-        if (sbi != NULL) {
-                cfs_spin_lock(&ll_sb_lock);
-                cfs_list_del(&sbi->ll_list);
-                cfs_spin_unlock(&ll_sb_lock);
-                OBD_FREE(sbi, sizeof(*sbi));
-        }
-        EXIT;
+       if (sbi != NULL) {
+               spin_lock(&ll_sb_lock);
+               cfs_list_del(&sbi->ll_list);
+               spin_unlock(&ll_sb_lock);
+               OBD_FREE(sbi, sizeof(*sbi));
+       }
+       EXIT;
 }
 
 static struct dentry_operations ll_d_root_ops = {
@@ -456,11 +456,11 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
                 GOTO(out_dt, err);
         }
 
-        cfs_mutex_lock(&sbi->ll_lco.lco_lock);
+       mutex_lock(&sbi->ll_lco.lco_lock);
         sbi->ll_lco.lco_flags = data->ocd_connect_flags;
         sbi->ll_lco.lco_md_exp = sbi->ll_md_exp;
         sbi->ll_lco.lco_dt_exp = sbi->ll_dt_exp;
-        cfs_mutex_unlock(&sbi->ll_lco.lco_lock);
+       mutex_unlock(&sbi->ll_lco.lco_lock);
 
         fid_zero(&sbi->ll_root_fid);
         err = md_getstatus(sbi->ll_md_exp, &sbi->ll_root_fid, &oc);
@@ -887,14 +887,14 @@ next:
 
 void ll_lli_init(struct ll_inode_info *lli)
 {
-        lli->lli_inode_magic = LLI_INODE_MAGIC;
-        lli->lli_flags = 0;
-        lli->lli_ioepoch = 0;
-        lli->lli_maxbytes = MAX_LFS_FILESIZE;
-        cfs_spin_lock_init(&lli->lli_lock);
-        lli->lli_posix_acl = NULL;
-        lli->lli_remote_perms = NULL;
-        cfs_mutex_init(&lli->lli_rmtperm_mutex);
+       lli->lli_inode_magic = LLI_INODE_MAGIC;
+       lli->lli_flags = 0;
+       lli->lli_ioepoch = 0;
+       lli->lli_maxbytes = MAX_LFS_FILESIZE;
+       spin_lock_init(&lli->lli_lock);
+       lli->lli_posix_acl = NULL;
+       lli->lli_remote_perms = NULL;
+       mutex_init(&lli->lli_rmtperm_mutex);
         /* Do not set lli_fid, it has been initialized already. */
         fid_zero(&lli->lli_pfid);
         CFS_INIT_LIST_HEAD(&lli->lli_close_list);
@@ -909,32 +909,32 @@ void ll_lli_init(struct ll_inode_info *lli)
         lli->lli_open_fd_read_count = 0;
         lli->lli_open_fd_write_count = 0;
         lli->lli_open_fd_exec_count = 0;
-        cfs_mutex_init(&lli->lli_och_mutex);
-        cfs_spin_lock_init(&lli->lli_agl_lock);
+       mutex_init(&lli->lli_och_mutex);
+       spin_lock_init(&lli->lli_agl_lock);
        lli->lli_has_smd = false;
-        lli->lli_clob = NULL;
-
-        LASSERT(lli->lli_vfs_inode.i_mode != 0);
-        if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
-                cfs_mutex_init(&lli->lli_readdir_mutex);
-                lli->lli_opendir_key = NULL;
-                lli->lli_sai = NULL;
-                lli->lli_def_acl = NULL;
-                cfs_spin_lock_init(&lli->lli_sa_lock);
-                lli->lli_opendir_pid = 0;
-        } else {
-                cfs_sema_init(&lli->lli_size_sem, 1);
-                lli->lli_size_sem_owner = NULL;
-                lli->lli_symlink_name = NULL;
-                cfs_init_rwsem(&lli->lli_trunc_sem);
-                cfs_mutex_init(&lli->lli_write_mutex);
-               cfs_init_rwsem(&lli->lli_glimpse_sem);
+       lli->lli_clob = NULL;
+
+       LASSERT(lli->lli_vfs_inode.i_mode != 0);
+       if (S_ISDIR(lli->lli_vfs_inode.i_mode)) {
+               mutex_init(&lli->lli_readdir_mutex);
+               lli->lli_opendir_key = NULL;
+               lli->lli_sai = NULL;
+               lli->lli_def_acl = NULL;
+               spin_lock_init(&lli->lli_sa_lock);
+               lli->lli_opendir_pid = 0;
+       } else {
+               sema_init(&lli->lli_size_sem, 1);
+               lli->lli_size_sem_owner = NULL;
+               lli->lli_symlink_name = NULL;
+               init_rwsem(&lli->lli_trunc_sem);
+               mutex_init(&lli->lli_write_mutex);
+               init_rwsem(&lli->lli_glimpse_sem);
                lli->lli_glimpse_time = 0;
                CFS_INIT_LIST_HEAD(&lli->lli_agl_list);
                lli->lli_agl_index = 0;
                lli->lli_async_rc = 0;
        }
-       cfs_mutex_init(&lli->lli_layout_mutex);
+       mutex_init(&lli->lli_layout_mutex);
 }
 
 static inline int ll_bdi_register(struct backing_dev_info *bdi)
@@ -1465,7 +1465,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
                if (ia_valid & ATTR_SIZE)
                        inode_dio_write_done(inode);
                mutex_unlock(&inode->i_mutex);
-               cfs_down_write(&lli->lli_trunc_sem);
+               down_write(&lli->lli_trunc_sem);
                mutex_lock(&inode->i_mutex);
                if (ia_valid & ATTR_SIZE)
                        inode_dio_wait(inode);
@@ -1520,7 +1520,7 @@ out:
                 ll_finish_md_op_data(op_data);
         }
         if (!S_ISDIR(inode->i_mode))
-                cfs_up_write(&lli->lli_trunc_sem);
+               up_write(&lli->lli_trunc_sem);
 
         ll_stats_ops_tally(ll_i2sbi(inode), (ia_valid & ATTR_SIZE) ?
                            LPROC_LL_TRUNC : LPROC_LL_SETATTR, 1);
@@ -1652,7 +1652,7 @@ void ll_inode_size_lock(struct inode *inode)
 
         lli = ll_i2info(inode);
         LASSERT(lli->lli_size_sem_owner != current);
-        cfs_down(&lli->lli_size_sem);
+       down(&lli->lli_size_sem);
         LASSERT(lli->lli_size_sem_owner == NULL);
         lli->lli_size_sem_owner = current;
 }
@@ -1664,7 +1664,7 @@ void ll_inode_size_unlock(struct inode *inode)
         lli = ll_i2info(inode);
         LASSERT(lli->lli_size_sem_owner == current);
         lli->lli_size_sem_owner = NULL;
-        cfs_up(&lli->lli_size_sem);
+       up(&lli->lli_size_sem);
 }
 
 void ll_update_inode(struct inode *inode, struct lustre_md *md)
@@ -1698,13 +1698,13 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
                         ll_update_remote_perm(inode, md->remote_perm);
         }
 #ifdef CONFIG_FS_POSIX_ACL
-        else if (body->valid & OBD_MD_FLACL) {
-                cfs_spin_lock(&lli->lli_lock);
-                if (lli->lli_posix_acl)
-                        posix_acl_release(lli->lli_posix_acl);
-                lli->lli_posix_acl = md->posix_acl;
-                cfs_spin_unlock(&lli->lli_lock);
-        }
+       else if (body->valid & OBD_MD_FLACL) {
+               spin_lock(&lli->lli_lock);
+               if (lli->lli_posix_acl)
+                       posix_acl_release(lli->lli_posix_acl);
+               lli->lli_posix_acl = md->posix_acl;
+               spin_unlock(&lli->lli_lock);
+       }
 #endif
         inode->i_ino = cl_fid_build_ino(&body->fid1, 0);
         inode->i_generation = cl_fid_build_gen(&body->fid1);
@@ -2299,14 +2299,14 @@ struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
            !ll_i2info(i2)->lli_has_smd) {
                struct ll_inode_info *lli = ll_i2info(i2);
 
-               cfs_spin_lock(&lli->lli_lock);
+               spin_lock(&lli->lli_lock);
                if (likely(!lli->lli_has_smd && !fid_is_zero(&lli->lli_pfid)))
-                        op_data->op_fid1 = lli->lli_pfid;
-                cfs_spin_unlock(&lli->lli_lock);
-                /** We ignore parent's capability temporary. */
-        }
+                       op_data->op_fid1 = lli->lli_pfid;
+               spin_unlock(&lli->lli_lock);
+               /** We ignore parent's capability temporary. */
+       }
 
-        return op_data;
+       return op_data;
 }
 
 void ll_finish_md_op_data(struct md_op_data *op_data)
index 3a0c077..ba845d1 100644 (file)
@@ -211,11 +211,11 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
         * while truncate is on-going. */
        inode = ccc_object_inode(io->ci_obj);
        lli = ll_i2info(inode);
-       cfs_down_read(&lli->lli_trunc_sem);
+       down_read(&lli->lli_trunc_sem);
 
        result = cl_io_loop(env, io);
 
-       cfs_up_read(&lli->lli_trunc_sem);
+       up_read(&lli->lli_trunc_sem);
 
        cfs_restore_sigs(set);
 
index 1f18337..3a0ffef 100644 (file)
@@ -145,13 +145,13 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
          * We have to find the parent to tell MDS how to init lov objects.
          */
        if (S_ISREG(inode->i_mode) && !ll_i2info(inode)->lli_has_smd &&
-            parent != NULL) {
-                struct ll_inode_info *lli = ll_i2info(inode);
+           parent != NULL) {
+               struct ll_inode_info *lli = ll_i2info(inode);
 
-                cfs_spin_lock(&lli->lli_lock);
-                lli->lli_pfid = *parent;
-                cfs_spin_unlock(&lli->lli_lock);
-        }
+               spin_lock(&lli->lli_lock);
+               lli->lli_pfid = *parent;
+               spin_unlock(&lli->lli_lock);
+       }
 
         result = d_obtain_alias(inode);
         if (IS_ERR(result))
@@ -232,9 +232,9 @@ static int ll_get_name(struct dentry *dentry, char *name,
         lgd.lgd_fid = ll_i2info(child->d_inode)->lli_fid;
         lgd.lgd_found = 0;
 
-        cfs_mutex_lock(&dir->i_mutex);
+       mutex_lock(&dir->i_mutex);
        rc = ll_dir_read(dir, &offset, &lgd, ll_nfs_get_name_filldir);
-        cfs_mutex_unlock(&dir->i_mutex);
+       mutex_unlock(&dir->i_mutex);
         if (!rc && !lgd.lgd_found)
                 rc = -ENOENT;
         EXIT;
index 2ff821e..87a0dd6 100644 (file)
@@ -109,12 +109,12 @@ static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
 
 struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key)
 {
-        struct rmtacl_ctl_entry *rce;
+       struct rmtacl_ctl_entry *rce;
 
-        cfs_spin_lock(&rct->rct_lock);
-        rce = __rct_search(rct, key);
-        cfs_spin_unlock(&rct->rct_lock);
-        return rce;
+       spin_lock(&rct->rct_lock);
+       rce = __rct_search(rct, key);
+       spin_unlock(&rct->rct_lock);
+       return rce;
 }
 
 int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
@@ -125,54 +125,54 @@ int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
         if (rce == NULL)
                 return -ENOMEM;
 
-        cfs_spin_lock(&rct->rct_lock);
-        e = __rct_search(rct, key);
-        if (unlikely(e != NULL)) {
-                CWARN("Unexpected stale rmtacl_entry found: "
-                      "[key: %d] [ops: %d]\n", (int)key, ops);
-                rce_free(e);
-        }
-        cfs_list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
-        cfs_spin_unlock(&rct->rct_lock);
-
-        return 0;
+       spin_lock(&rct->rct_lock);
+       e = __rct_search(rct, key);
+       if (unlikely(e != NULL)) {
+               CWARN("Unexpected stale rmtacl_entry found: "
+                     "[key: %d] [ops: %d]\n", (int)key, ops);
+               rce_free(e);
+       }
+       cfs_list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
+       spin_unlock(&rct->rct_lock);
+
+       return 0;
 }
 
 int rct_del(struct rmtacl_ctl_table *rct, pid_t key)
 {
-        struct rmtacl_ctl_entry *rce;
+       struct rmtacl_ctl_entry *rce;
 
-        cfs_spin_lock(&rct->rct_lock);
-        rce = __rct_search(rct, key);
-        if (rce)
-                rce_free(rce);
-        cfs_spin_unlock(&rct->rct_lock);
+       spin_lock(&rct->rct_lock);
+       rce = __rct_search(rct, key);
+       if (rce)
+               rce_free(rce);
+       spin_unlock(&rct->rct_lock);
 
-        return rce ? 0 : -ENOENT;
+       return rce ? 0 : -ENOENT;
 }
 
 void rct_init(struct rmtacl_ctl_table *rct)
 {
-        int i;
+       int i;
 
-        cfs_spin_lock_init(&rct->rct_lock);
-        for (i = 0; i < RCE_HASHES; i++)
-                CFS_INIT_LIST_HEAD(&rct->rct_entries[i]);
+       spin_lock_init(&rct->rct_lock);
+       for (i = 0; i < RCE_HASHES; i++)
+               CFS_INIT_LIST_HEAD(&rct->rct_entries[i]);
 }
 
 void rct_fini(struct rmtacl_ctl_table *rct)
 {
-        struct rmtacl_ctl_entry *rce;
-        int i;
-
-        cfs_spin_lock(&rct->rct_lock);
-        for (i = 0; i < RCE_HASHES; i++)
-                while (!cfs_list_empty(&rct->rct_entries[i])) {
-                        rce = cfs_list_entry(rct->rct_entries[i].next,
-                                             struct rmtacl_ctl_entry, rce_list);
-                        rce_free(rce);
-                }
-        cfs_spin_unlock(&rct->rct_lock);
+       struct rmtacl_ctl_entry *rce;
+       int i;
+
+       spin_lock(&rct->rct_lock);
+       for (i = 0; i < RCE_HASHES; i++)
+               while (!cfs_list_empty(&rct->rct_entries[i])) {
+                       rce = cfs_list_entry(rct->rct_entries[i].next,
+                                            struct rmtacl_ctl_entry, rce_list);
+                       rce_free(rce);
+               }
+       spin_unlock(&rct->rct_lock);
 }
 
 
@@ -227,25 +227,25 @@ static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
 struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
                                  struct lu_fid *fid, int type)
 {
-        struct eacl_entry *ee;
+       struct eacl_entry *ee;
 
-        cfs_spin_lock(&et->et_lock);
-        ee = __et_search_del(et, key, fid, type);
-        cfs_spin_unlock(&et->et_lock);
-        return ee;
+       spin_lock(&et->et_lock);
+       ee = __et_search_del(et, key, fid, type);
+       spin_unlock(&et->et_lock);
+       return ee;
 }
 
 void et_search_free(struct eacl_table *et, pid_t key)
 {
-        struct eacl_entry *ee, *next;
-        cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
+       struct eacl_entry *ee, *next;
+       cfs_list_t *head = &et->et_entries[ee_hashfunc(key)];
 
-        cfs_spin_lock(&et->et_lock);
-        cfs_list_for_each_entry_safe(ee, next, head, ee_list)
-                if (ee->ee_key == key)
-                        ee_free(ee);
+       spin_lock(&et->et_lock);
+       cfs_list_for_each_entry_safe(ee, next, head, ee_list)
+               if (ee->ee_key == key)
+                       ee_free(ee);
 
-        cfs_spin_unlock(&et->et_lock);
+       spin_unlock(&et->et_lock);
 }
 
 int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
@@ -257,42 +257,42 @@ int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
         if (ee == NULL)
                 return -ENOMEM;
 
-        cfs_spin_lock(&et->et_lock);
-        e = __et_search_del(et, key, fid, type);
-        if (unlikely(e != NULL)) {
-                CWARN("Unexpected stale eacl_entry found: "
-                      "[key: %d] [fid: "DFID"] [type: %d]\n",
-                      (int)key, PFID(fid), type);
-                ee_free(e);
-        }
-        cfs_list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
-        cfs_spin_unlock(&et->et_lock);
-
-        return 0;
+       spin_lock(&et->et_lock);
+       e = __et_search_del(et, key, fid, type);
+       if (unlikely(e != NULL)) {
+               CWARN("Unexpected stale eacl_entry found: "
+                     "[key: %d] [fid: "DFID"] [type: %d]\n",
+                     (int)key, PFID(fid), type);
+               ee_free(e);
+       }
+       cfs_list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
+       spin_unlock(&et->et_lock);
+
+       return 0;
 }
 
 void et_init(struct eacl_table *et)
 {
-        int i;
+       int i;
 
-        cfs_spin_lock_init(&et->et_lock);
-        for (i = 0; i < EE_HASHES; i++)
-                CFS_INIT_LIST_HEAD(&et->et_entries[i]);
+       spin_lock_init(&et->et_lock);
+       for (i = 0; i < EE_HASHES; i++)
+               CFS_INIT_LIST_HEAD(&et->et_entries[i]);
 }
 
 void et_fini(struct eacl_table *et)
 {
-        struct eacl_entry *ee;
-        int i;
-
-        cfs_spin_lock(&et->et_lock);
-        for (i = 0; i < EE_HASHES; i++)
-                while (!cfs_list_empty(&et->et_entries[i])) {
-                        ee = cfs_list_entry(et->et_entries[i].next,
-                                            struct eacl_entry, ee_list);
-                        ee_free(ee);
-                }
-        cfs_spin_unlock(&et->et_lock);
+       struct eacl_entry *ee;
+       int i;
+
+       spin_lock(&et->et_lock);
+       for (i = 0; i < EE_HASHES; i++)
+               while (!cfs_list_empty(&et->et_entries[i])) {
+                       ee = cfs_list_entry(et->et_entries[i].next,
+                                           struct eacl_entry, ee_list);
+                       ee_free(ee);
+               }
+       spin_unlock(&et->et_lock);
 }
 
 #endif
index 59814e6..b9795e2 100644 (file)
@@ -132,12 +132,12 @@ struct lloop_device {
 
         int                  old_gfp_mask;
 
-        cfs_spinlock_t       lo_lock;
-        struct bio          *lo_bio;
-        struct bio          *lo_biotail;
-        int                  lo_state;
-        cfs_semaphore_t      lo_sem;
-        cfs_mutex_t          lo_ctl_mutex;
+       spinlock_t              lo_lock;
+       struct bio              *lo_bio;
+       struct bio              *lo_biotail;
+       int                     lo_state;
+       struct semaphore        lo_sem;
+       struct mutex            lo_ctl_mutex;
         cfs_atomic_t         lo_pending;
         cfs_waitq_t          lo_bh_wait;
 
@@ -166,7 +166,7 @@ static int lloop_major;
 static int max_loop = MAX_LOOP_DEFAULT;
 static struct lloop_device *loop_dev;
 static struct gendisk **disks;
-static cfs_mutex_t lloop_mutex;
+static struct mutex lloop_mutex;
 static void *ll_iocontrol_magic = NULL;
 
 static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
@@ -274,19 +274,19 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
  */
 static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
 {
-        unsigned long flags;
-
-        cfs_spin_lock_irqsave(&lo->lo_lock, flags);
-        if (lo->lo_biotail) {
-                lo->lo_biotail->bi_next = bio;
-                lo->lo_biotail = bio;
-        } else
-                lo->lo_bio = lo->lo_biotail = bio;
-        cfs_spin_unlock_irqrestore(&lo->lo_lock, flags);
-
-        cfs_atomic_inc(&lo->lo_pending);
-        if (cfs_waitq_active(&lo->lo_bh_wait))
-                cfs_waitq_signal(&lo->lo_bh_wait);
+       unsigned long flags;
+
+       spin_lock_irqsave(&lo->lo_lock, flags);
+       if (lo->lo_biotail) {
+               lo->lo_biotail->bi_next = bio;
+               lo->lo_biotail = bio;
+       } else
+               lo->lo_bio = lo->lo_biotail = bio;
+       spin_unlock_irqrestore(&lo->lo_lock, flags);
+
+       cfs_atomic_inc(&lo->lo_pending);
+       if (cfs_waitq_active(&lo->lo_bh_wait))
+               cfs_waitq_signal(&lo->lo_bh_wait);
 }
 
 /*
@@ -294,18 +294,18 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
  */
 static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
 {
-        struct bio *first;
-        struct bio **bio;
-        unsigned int count = 0;
-        unsigned int page_count = 0;
-        int rw;
-
-        cfs_spin_lock_irq(&lo->lo_lock);
-        first = lo->lo_bio;
-        if (unlikely(first == NULL)) {
-                cfs_spin_unlock_irq(&lo->lo_lock);
-                return 0;
-        }
+       struct bio *first;
+       struct bio **bio;
+       unsigned int count = 0;
+       unsigned int page_count = 0;
+       int rw;
+
+       spin_lock_irq(&lo->lo_lock);
+       first = lo->lo_bio;
+       if (unlikely(first == NULL)) {
+               spin_unlock_irq(&lo->lo_lock);
+               return 0;
+       }
 
         /* TODO: need to split the bio, too bad. */
         LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
@@ -334,8 +334,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
                 lo->lo_bio = NULL;
         }
         *req = first;
-        cfs_spin_unlock_irq(&lo->lo_lock);
-        return count;
+       spin_unlock_irq(&lo->lo_lock);
+       return count;
 }
 
 static ll_mrf_ret
@@ -351,9 +351,9 @@ loop_make_request(struct request_queue *q, struct bio *old_bio)
         CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
                (unsigned long long)old_bio->bi_sector, old_bio->bi_size);
 
-        cfs_spin_lock_irq(&lo->lo_lock);
-        inactive = (lo->lo_state != LLOOP_BOUND);
-        cfs_spin_unlock_irq(&lo->lo_lock);
+       spin_lock_irq(&lo->lo_lock);
+       inactive = (lo->lo_state != LLOOP_BOUND);
+       spin_unlock_irq(&lo->lo_lock);
         if (inactive)
                 goto err;
 
@@ -438,15 +438,15 @@ static int loop_thread(void *data)
         /*
          * up sem, we are running
          */
-        cfs_up(&lo->lo_sem);
-
-        for (;;) {
-                cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
-                if (!cfs_atomic_read(&lo->lo_pending)) {
-                        int exiting = 0;
-                        cfs_spin_lock_irq(&lo->lo_lock);
-                        exiting = (lo->lo_state == LLOOP_RUNDOWN);
-                        cfs_spin_unlock_irq(&lo->lo_lock);
+       up(&lo->lo_sem);
+
+       for (;;) {
+               cfs_wait_event(lo->lo_bh_wait, loop_active(lo));
+               if (!cfs_atomic_read(&lo->lo_pending)) {
+                       int exiting = 0;
+                       spin_lock_irq(&lo->lo_lock);
+                       exiting = (lo->lo_state == LLOOP_RUNDOWN);
+                       spin_unlock_irq(&lo->lo_lock);
                         if (exiting)
                                 break;
                 }
@@ -478,7 +478,7 @@ static int loop_thread(void *data)
         cl_env_put(env, &refcheck);
 
 out:
-        cfs_up(&lo->lo_sem);
+       up(&lo->lo_sem);
         return ret;
 }
 
@@ -555,7 +555,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
         set_blocksize(bdev, lo->lo_blocksize);
 
         cfs_create_thread(loop_thread, lo, CLONE_KERNEL);
-        cfs_down(&lo->lo_sem);
+       down(&lo->lo_sem);
         return 0;
 
  out:
@@ -579,12 +579,12 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
         if (filp == NULL)
                 return -EINVAL;
 
-        cfs_spin_lock_irq(&lo->lo_lock);
-        lo->lo_state = LLOOP_RUNDOWN;
-        cfs_spin_unlock_irq(&lo->lo_lock);
-        cfs_waitq_signal(&lo->lo_bh_wait);
+       spin_lock_irq(&lo->lo_lock);
+       lo->lo_state = LLOOP_RUNDOWN;
+       spin_unlock_irq(&lo->lo_lock);
+       cfs_waitq_signal(&lo->lo_bh_wait);
 
-        cfs_down(&lo->lo_sem);
+       down(&lo->lo_sem);
         lo->lo_backing_file = NULL;
         lo->ioctl = NULL;
         lo->lo_device = NULL;
@@ -612,9 +612,9 @@ static int lo_open(struct inode *inode, struct file *file)
         struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
 #endif
 
-        cfs_mutex_lock(&lo->lo_ctl_mutex);
+       mutex_lock(&lo->lo_ctl_mutex);
         lo->lo_refcnt++;
-        cfs_mutex_unlock(&lo->lo_ctl_mutex);
+       mutex_unlock(&lo->lo_ctl_mutex);
 
         return 0;
 }
@@ -629,9 +629,9 @@ static int lo_release(struct inode *inode, struct file *file)
         struct lloop_device *lo = inode->i_bdev->bd_disk->private_data;
 #endif
 
-        cfs_mutex_lock(&lo->lo_ctl_mutex);
+       mutex_lock(&lo->lo_ctl_mutex);
         --lo->lo_refcnt;
-        cfs_mutex_unlock(&lo->lo_ctl_mutex);
+       mutex_unlock(&lo->lo_ctl_mutex);
 
         return 0;
 }
@@ -653,7 +653,7 @@ static int lo_ioctl(struct inode *inode, struct file *unused,
         int err = 0;
 #endif
 
-        cfs_mutex_lock(&lloop_mutex);
+       mutex_lock(&lloop_mutex);
         switch (cmd) {
         case LL_IOC_LLOOP_DETACH: {
                 err = loop_clr_fd(lo, bdev, 2);
@@ -682,7 +682,7 @@ static int lo_ioctl(struct inode *inode, struct file *unused,
                 err = -EINVAL;
                 break;
         }
-        cfs_mutex_unlock(&lloop_mutex);
+       mutex_unlock(&lloop_mutex);
 
         return err;
 }
@@ -718,7 +718,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
 
         CWARN("Enter llop_ioctl\n");
 
-        cfs_mutex_lock(&lloop_mutex);
+       mutex_lock(&lloop_mutex);
         switch (cmd) {
         case LL_IOC_LLOOP_ATTACH: {
                 struct lloop_device *lo_free = NULL;
@@ -788,7 +788,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
         }
 
 out:
-        cfs_mutex_unlock(&lloop_mutex);
+       mutex_unlock(&lloop_mutex);
 out1:
         if (rcp)
                 *rcp = err;
@@ -834,7 +834,7 @@ static int __init lloop_init(void)
                         goto out_mem3;
         }
 
-        cfs_mutex_init(&lloop_mutex);
+       mutex_init(&lloop_mutex);
 
         for (i = 0; i < max_loop; i++) {
                 struct lloop_device *lo = &loop_dev[i];
@@ -844,11 +844,11 @@ static int __init lloop_init(void)
                 if (!lo->lo_queue)
                         goto out_mem4;
 
-                cfs_mutex_init(&lo->lo_ctl_mutex);
-                cfs_sema_init(&lo->lo_sem, 0);
-                cfs_waitq_init(&lo->lo_bh_wait);
-                lo->lo_number = i;
-                cfs_spin_lock_init(&lo->lo_lock);
+               mutex_init(&lo->lo_ctl_mutex);
+               sema_init(&lo->lo_sem, 0);
+               cfs_waitq_init(&lo->lo_bh_wait);
+               lo->lo_number = i;
+               spin_lock_init(&lo->lo_lock);
                 disk->major = lloop_major;
                 disk->first_minor = i;
                 disk->fops = &lo_fops;
index 4a96147..c10ecb3 100644 (file)
@@ -234,17 +234,17 @@ static int ll_rd_site_stats(char *page, char **start, off_t off,
 static int ll_rd_max_readahead_mb(char *page, char **start, off_t off,
                                    int count, int *eof, void *data)
 {
-        struct super_block *sb = data;
-        struct ll_sb_info *sbi = ll_s2sbi(sb);
-        long pages_number;
-        int mult;
+       struct super_block *sb = data;
+       struct ll_sb_info *sbi = ll_s2sbi(sb);
+       long pages_number;
+       int mult;
 
-        cfs_spin_lock(&sbi->ll_lock);
-        pages_number = sbi->ll_ra_info.ra_max_pages;
-        cfs_spin_unlock(&sbi->ll_lock);
+       spin_lock(&sbi->ll_lock);
+       pages_number = sbi->ll_ra_info.ra_max_pages;
+       spin_unlock(&sbi->ll_lock);
 
-        mult = 1 << (20 - PAGE_CACHE_SHIFT);
-        return lprocfs_read_frac_helper(page, count, pages_number, mult);
+       mult = 1 << (20 - PAGE_CACHE_SHIFT);
+       return lprocfs_read_frac_helper(page, count, pages_number, mult);
 }
 
 static int ll_wr_max_readahead_mb(struct file *file, const char *buffer,
@@ -265,27 +265,27 @@ static int ll_wr_max_readahead_mb(struct file *file, const char *buffer,
                 return -ERANGE;
         }
 
-        cfs_spin_lock(&sbi->ll_lock);
-        sbi->ll_ra_info.ra_max_pages = pages_number;
-        cfs_spin_unlock(&sbi->ll_lock);
+       spin_lock(&sbi->ll_lock);
+       sbi->ll_ra_info.ra_max_pages = pages_number;
+       spin_unlock(&sbi->ll_lock);
 
-        return count;
+       return count;
 }
 
 static int ll_rd_max_readahead_per_file_mb(char *page, char **start, off_t off,
                                            int count, int *eof, void *data)
 {
-        struct super_block *sb = data;
-        struct ll_sb_info *sbi = ll_s2sbi(sb);
-        long pages_number;
-        int mult;
+       struct super_block *sb = data;
+       struct ll_sb_info *sbi = ll_s2sbi(sb);
+       long pages_number;
+       int mult;
 
-        cfs_spin_lock(&sbi->ll_lock);
-        pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
-        cfs_spin_unlock(&sbi->ll_lock);
+       spin_lock(&sbi->ll_lock);
+       pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
+       spin_unlock(&sbi->ll_lock);
 
-        mult = 1 << (20 - CFS_PAGE_SHIFT);
-        return lprocfs_read_frac_helper(page, count, pages_number, mult);
+       mult = 1 << (20 - CFS_PAGE_SHIFT);
+       return lprocfs_read_frac_helper(page, count, pages_number, mult);
 }
 
 static int ll_wr_max_readahead_per_file_mb(struct file *file, const char *buffer,
@@ -308,27 +308,27 @@ static int ll_wr_max_readahead_per_file_mb(struct file *file, const char *buffer
                 return -ERANGE;
         }
 
-        cfs_spin_lock(&sbi->ll_lock);
-        sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
-        cfs_spin_unlock(&sbi->ll_lock);
+       spin_lock(&sbi->ll_lock);
+       sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
+       spin_unlock(&sbi->ll_lock);
 
-        return count;
+       return count;
 }
 
 static int ll_rd_max_read_ahead_whole_mb(char *page, char **start, off_t off,
                                          int count, int *eof, void *data)
 {
-        struct super_block *sb = data;
-        struct ll_sb_info *sbi = ll_s2sbi(sb);
-        long pages_number;
-        int mult;
+       struct super_block *sb = data;
+       struct ll_sb_info *sbi = ll_s2sbi(sb);
+       long pages_number;
+       int mult;
 
-        cfs_spin_lock(&sbi->ll_lock);
-        pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
-        cfs_spin_unlock(&sbi->ll_lock);
+       spin_lock(&sbi->ll_lock);
+       pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
+       spin_unlock(&sbi->ll_lock);
 
-        mult = 1 << (20 - CFS_PAGE_SHIFT);
-        return lprocfs_read_frac_helper(page, count, pages_number, mult);
+       mult = 1 << (20 - CFS_PAGE_SHIFT);
+       return lprocfs_read_frac_helper(page, count, pages_number, mult);
 }
 
 static int ll_wr_max_read_ahead_whole_mb(struct file *file, const char *buffer,
@@ -353,11 +353,11 @@ static int ll_wr_max_read_ahead_whole_mb(struct file *file, const char *buffer,
                 return -ERANGE;
         }
 
-        cfs_spin_lock(&sbi->ll_lock);
-        sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
-        cfs_spin_unlock(&sbi->ll_lock);
+       spin_lock(&sbi->ll_lock);
+       sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
+       spin_unlock(&sbi->ll_lock);
 
-        return count;
+       return count;
 }
 
 static int ll_rd_max_cached_mb(char *page, char **start, off_t off,
@@ -413,9 +413,9 @@ static int ll_wr_max_cached_mb(struct file *file, const char *buffer,
        if (sbi->ll_dt_exp == NULL)
                RETURN(-ENODEV);
 
-       cfs_spin_lock(&sbi->ll_lock);
+       spin_lock(&sbi->ll_lock);
        diff = pages_number - cache->ccc_lru_max;
-       cfs_spin_unlock(&sbi->ll_lock);
+       spin_unlock(&sbi->ll_lock);
 
        /* easy - add more LRU slots. */
        if (diff >= 0) {
@@ -459,9 +459,9 @@ static int ll_wr_max_cached_mb(struct file *file, const char *buffer,
 
 out:
        if (rc >= 0) {
-               cfs_spin_lock(&sbi->ll_lock);
+               spin_lock(&sbi->ll_lock);
                cache->ccc_lru_max = pages_number;
-               cfs_spin_unlock(&sbi->ll_lock);
+               spin_unlock(&sbi->ll_lock);
                rc = count;
        } else {
                cfs_atomic_add(nrpages, &cache->ccc_lru_left);
@@ -1052,16 +1052,16 @@ static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
                    "extents", "calls", "%", "cum%",
                    "calls", "%", "cum%");
-        cfs_spin_lock(&sbi->ll_pp_extent_lock);
-        for(k = 0; k < LL_PROCESS_HIST_MAX; k++) {
-                if(io_extents->pp_extents[k].pid != 0) {
-                        seq_printf(seq, "\nPID: %d\n",
-                                   io_extents->pp_extents[k].pid);
-                        ll_display_extents_info(io_extents, seq, k);
-                }
-        }
-        cfs_spin_unlock(&sbi->ll_pp_extent_lock);
-        return 0;
+       spin_lock(&sbi->ll_pp_extent_lock);
+       for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
+               if (io_extents->pp_extents[k].pid != 0) {
+                       seq_printf(seq, "\nPID: %d\n",
+                                  io_extents->pp_extents[k].pid);
+                       ll_display_extents_info(io_extents, seq, k);
+               }
+       }
+       spin_unlock(&sbi->ll_pp_extent_lock);
+       return 0;
 }
 
 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
@@ -1084,14 +1084,14 @@ static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
         else
                 sbi->ll_rw_stats_on = 1;
 
-        cfs_spin_lock(&sbi->ll_pp_extent_lock);
-        for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
-                io_extents->pp_extents[i].pid = 0;
-                lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
-                lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
-        }
-        cfs_spin_unlock(&sbi->ll_pp_extent_lock);
-        return len;
+       spin_lock(&sbi->ll_pp_extent_lock);
+       for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
+               io_extents->pp_extents[i].pid = 0;
+               lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
+               lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
+       }
+       spin_unlock(&sbi->ll_pp_extent_lock);
+       return len;
 }
 
 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
@@ -1117,11 +1117,11 @@ static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
         seq_printf(seq, "%13s   %14s %4s %4s  | %14s %4s %4s\n",
                    "extents", "calls", "%", "cum%",
                    "calls", "%", "cum%");
-        cfs_spin_lock(&sbi->ll_lock);
-        ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
-        cfs_spin_unlock(&sbi->ll_lock);
+       spin_lock(&sbi->ll_lock);
+       ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
+       spin_unlock(&sbi->ll_lock);
 
-        return 0;
+       return 0;
 }
 
 static ssize_t ll_rw_extents_stats_seq_write(struct file *file, const char *buf,
@@ -1142,16 +1142,15 @@ static ssize_t ll_rw_extents_stats_seq_write(struct file *file, const char *buf,
                 sbi->ll_rw_stats_on = 0;
         else
                 sbi->ll_rw_stats_on = 1;
-        cfs_spin_lock(&sbi->ll_pp_extent_lock);
-        for(i = 0; i <= LL_PROCESS_HIST_MAX; i++)
-        {
-                io_extents->pp_extents[i].pid = 0;
-                lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
-                lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
-        }
-        cfs_spin_unlock(&sbi->ll_pp_extent_lock);
+       spin_lock(&sbi->ll_pp_extent_lock);
+       for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
+               io_extents->pp_extents[i].pid = 0;
+               lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
+               lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
+       }
+       spin_unlock(&sbi->ll_pp_extent_lock);
 
-        return len;
+       return len;
 }
 
 LPROC_SEQ_FOPS(ll_rw_extents_stats);
@@ -1172,7 +1171,7 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
         process = sbi->ll_rw_process_info;
         offset = sbi->ll_rw_offset_info;
 
-        cfs_spin_lock(&sbi->ll_pp_extent_lock);
+       spin_lock(&sbi->ll_pp_extent_lock);
         /* Extent statistics */
         for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
                 if(io_extents->pp_extents[i].pid == pid) {
@@ -1200,9 +1199,9 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
                 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
                 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
         }
-        cfs_spin_unlock(&sbi->ll_pp_extent_lock);
+       spin_unlock(&sbi->ll_pp_extent_lock);
 
-        cfs_spin_lock(&sbi->ll_process_lock);
+       spin_lock(&sbi->ll_process_lock);
         /* Offset statistics */
         for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
                 if (process[i].rw_pid == pid) {
@@ -1213,7 +1212,7 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
                                 process[i].rw_largest_extent = count;
                                 process[i].rw_offset = 0;
                                 process[i].rw_last_file = file;
-                                cfs_spin_unlock(&sbi->ll_process_lock);
+                               spin_unlock(&sbi->ll_process_lock);
                                 return;
                         }
                         if (process[i].rw_last_file_pos != pos) {
@@ -1243,7 +1242,7 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
                         if(process[i].rw_largest_extent < count)
                                 process[i].rw_largest_extent = count;
                         process[i].rw_last_file_pos = pos + count;
-                        cfs_spin_unlock(&sbi->ll_process_lock);
+                       spin_unlock(&sbi->ll_process_lock);
                         return;
                 }
         }
@@ -1256,7 +1255,7 @@ void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
         process[*process_count].rw_largest_extent = count;
         process[*process_count].rw_offset = 0;
         process[*process_count].rw_last_file = file;
-        cfs_spin_unlock(&sbi->ll_process_lock);
+       spin_unlock(&sbi->ll_process_lock);
 }
 
 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
@@ -1275,7 +1274,7 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
                                 "then 0 or \"[D/d]isabled\" to deactivate\n");
                 return 0;
         }
-        cfs_spin_lock(&sbi->ll_process_lock);
+       spin_lock(&sbi->ll_process_lock);
 
         seq_printf(seq, "snapshot_time:         %lu.%lu (secs.usecs)\n",
                    now.tv_sec, now.tv_usec);
@@ -1306,9 +1305,9 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
                                    (unsigned long)process[i].rw_largest_extent,
                                    process[i].rw_offset);
         }
-        cfs_spin_unlock(&sbi->ll_process_lock);
+       spin_unlock(&sbi->ll_process_lock);
 
-        return 0;
+       return 0;
 }
 
 static ssize_t ll_rw_offset_stats_seq_write(struct file *file, const char *buf,
@@ -1331,16 +1330,16 @@ static ssize_t ll_rw_offset_stats_seq_write(struct file *file, const char *buf,
         else
                 sbi->ll_rw_stats_on = 1;
 
-        cfs_spin_lock(&sbi->ll_process_lock);
-        sbi->ll_offset_process_count = 0;
-        sbi->ll_rw_offset_entry_count = 0;
-        memset(process_info, 0, sizeof(struct ll_rw_process_info) *
-               LL_PROCESS_HIST_MAX);
-        memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
-               LL_OFFSET_HIST_MAX);
-        cfs_spin_unlock(&sbi->ll_process_lock);
+       spin_lock(&sbi->ll_process_lock);
+       sbi->ll_offset_process_count = 0;
+       sbi->ll_rw_offset_entry_count = 0;
+       memset(process_info, 0, sizeof(struct ll_rw_process_info) *
+              LL_PROCESS_HIST_MAX);
+       memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
+              LL_OFFSET_HIST_MAX);
+       spin_unlock(&sbi->ll_process_lock);
 
-        return len;
+       return len;
 }
 
 LPROC_SEQ_FOPS(ll_rw_offset_stats);
index 37471ae..b24a4e6 100644 (file)
@@ -132,7 +132,7 @@ static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
 
         head = lli->lli_remote_perms + remote_perm_hashfunc(cfs_curproc_uid());
 
-        cfs_spin_lock(&lli->lli_lock);
+       spin_lock(&lli->lli_lock);
         cfs_hlist_for_each_entry(lrp, node, head, lrp_list) {
                 if (lrp->lrp_uid != cfs_curproc_uid())
                         continue;
@@ -155,8 +155,8 @@ static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
         rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
 
 out:
-        cfs_spin_unlock(&lli->lli_lock);
-        return rc;
+       spin_unlock(&lli->lli_lock);
+       return rc;
 }
 
 int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
@@ -192,7 +192,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
                 }
         }
 
-        cfs_spin_lock(&lli->lli_lock);
+       spin_lock(&lli->lli_lock);
 
         if (!lli->lli_remote_perms)
                 lli->lli_remote_perms = perm_hash;
@@ -217,16 +217,16 @@ again:
                 break;
         }
 
-        if (!lrp) {
-                cfs_spin_unlock(&lli->lli_lock);
-                lrp = alloc_ll_remote_perm();
-                if (!lrp) {
-                        CERROR("alloc memory for ll_remote_perm failed!\n");
-                        RETURN(-ENOMEM);
-                }
-                cfs_spin_lock(&lli->lli_lock);
-                goto again;
-        }
+       if (!lrp) {
+               spin_unlock(&lli->lli_lock);
+               lrp = alloc_ll_remote_perm();
+               if (!lrp) {
+                       CERROR("alloc memory for ll_remote_perm failed!\n");
+                       RETURN(-ENOMEM);
+               }
+               spin_lock(&lli->lli_lock);
+               goto again;
+       }
 
         lrp->lrp_access_perm = perm->rp_access_perm;
         if (lrp != tmp) {
@@ -237,7 +237,7 @@ again:
                 cfs_hlist_add_head(&lrp->lrp_list, head);
         }
         lli->lli_rmtperm_time = cfs_time_current();
-        cfs_spin_unlock(&lli->lli_lock);
+       spin_unlock(&lli->lli_lock);
 
         CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
                lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
@@ -265,12 +265,12 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
 
                 cfs_might_sleep();
 
-                cfs_mutex_lock(&lli->lli_rmtperm_mutex);
+               mutex_lock(&lli->lli_rmtperm_mutex);
                 /* check again */
                 if (save != lli->lli_rmtperm_time) {
                         rc = do_check_remote_perm(lli, mask);
                         if (!rc || (rc != -ENOENT && i)) {
-                                cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
+                               mutex_unlock(&lli->lli_rmtperm_mutex);
                                 break;
                         }
                 }
@@ -285,20 +285,20 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
                                         ll_i2suppgid(inode), &req);
                 capa_put(oc);
                 if (rc) {
-                        cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
+                       mutex_unlock(&lli->lli_rmtperm_mutex);
                         break;
                 }
 
                 perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
                                                    lustre_swab_mdt_remote_perm);
                 if (unlikely(perm == NULL)) {
-                        cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
+                       mutex_unlock(&lli->lli_rmtperm_mutex);
                         rc = -EPROTO;
                         break;
                 }
 
                 rc = ll_update_remote_perm(inode, perm);
-                cfs_mutex_unlock(&lli->lli_rmtperm_mutex);
+               mutex_unlock(&lli->lli_rmtperm_mutex);
                 if (rc == -ENOMEM)
                         break;
 
@@ -322,14 +322,14 @@ void ll_free_remote_perms(struct inode *inode)
 
         LASSERT(hash);
 
-        cfs_spin_lock(&lli->lli_lock);
+       spin_lock(&lli->lli_lock);
 
-        for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
-                cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
-                                              lrp_list)
-                        free_ll_remote_perm(lrp);
-        }
+       for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
+               cfs_hlist_for_each_entry_safe(lrp, node, next, hash + i,
+                                             lrp_list)
+                       free_ll_remote_perm(lrp);
+       }
 
-        cfs_spin_unlock(&lli->lli_lock);
+       spin_unlock(&lli->lli_lock);
 }
 #endif
index f7749d5..a8ce592 100644 (file)
@@ -422,29 +422,29 @@ static struct ll_readahead_state *ll_ras_get(struct file *f)
 
 void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
 {
-        struct ll_readahead_state *ras;
+       struct ll_readahead_state *ras;
 
-        ras = ll_ras_get(f);
+       ras = ll_ras_get(f);
 
-        cfs_spin_lock(&ras->ras_lock);
-        ras->ras_requests++;
-        ras->ras_request_index = 0;
-        ras->ras_consecutive_requests++;
-        rar->lrr_reader = current;
+       spin_lock(&ras->ras_lock);
+       ras->ras_requests++;
+       ras->ras_request_index = 0;
+       ras->ras_consecutive_requests++;
+       rar->lrr_reader = current;
 
-        cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
-        cfs_spin_unlock(&ras->ras_lock);
+       cfs_list_add(&rar->lrr_linkage, &ras->ras_read_beads);
+       spin_unlock(&ras->ras_lock);
 }
 
 void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
 {
-        struct ll_readahead_state *ras;
+       struct ll_readahead_state *ras;
 
-        ras = ll_ras_get(f);
+       ras = ll_ras_get(f);
 
-        cfs_spin_lock(&ras->ras_lock);
-        cfs_list_del_init(&rar->lrr_linkage);
-        cfs_spin_unlock(&ras->ras_lock);
+       spin_lock(&ras->ras_lock);
+       cfs_list_del_init(&rar->lrr_linkage);
+       spin_unlock(&ras->ras_lock);
 }
 
 static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
@@ -460,15 +460,15 @@ static struct ll_ra_read *ll_ra_read_get_locked(struct ll_readahead_state *ras)
 
 struct ll_ra_read *ll_ra_read_get(struct file *f)
 {
-        struct ll_readahead_state *ras;
-        struct ll_ra_read         *bead;
+       struct ll_readahead_state *ras;
+       struct ll_ra_read         *bead;
 
-        ras = ll_ras_get(f);
+       ras = ll_ras_get(f);
 
-        cfs_spin_lock(&ras->ras_lock);
-        bead = ll_ra_read_get_locked(ras);
-        cfs_spin_unlock(&ras->ras_lock);
-        return bead;
+       spin_lock(&ras->ras_lock);
+       bead = ll_ra_read_get_locked(ras);
+       spin_unlock(&ras->ras_lock);
+       return bead;
 }
 
 static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
@@ -742,7 +742,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
                 RETURN(0);
         }
 
-        cfs_spin_lock(&ras->ras_lock);
+       spin_lock(&ras->ras_lock);
         if (vio->cui_ra_window_set)
                 bead = &vio->cui_bead;
         else
@@ -793,7 +793,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
                 ria->ria_length = ras->ras_stride_length;
                 ria->ria_pages = ras->ras_stride_pages;
         }
-        cfs_spin_unlock(&ras->ras_lock);
+       spin_unlock(&ras->ras_lock);
 
         if (end == 0) {
                 ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
@@ -829,18 +829,18 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
         CDEBUG(D_READA, "ra_end %lu end %lu stride end %lu \n",
                ra_end, end, ria->ria_end);
 
-        if (ra_end != end + 1) {
-                cfs_spin_lock(&ras->ras_lock);
-                if (ra_end < ras->ras_next_readahead &&
-                    index_in_window(ra_end, ras->ras_window_start, 0,
-                                    ras->ras_window_len)) {
-                        ras->ras_next_readahead = ra_end;
-                               RAS_CDEBUG(ras);
-                }
-                cfs_spin_unlock(&ras->ras_lock);
-        }
+       if (ra_end != end + 1) {
+               spin_lock(&ras->ras_lock);
+               if (ra_end < ras->ras_next_readahead &&
+                   index_in_window(ra_end, ras->ras_window_start, 0,
+                                   ras->ras_window_len)) {
+                       ras->ras_next_readahead = ra_end;
+                       RAS_CDEBUG(ras);
+               }
+               spin_unlock(&ras->ras_lock);
+       }
 
-        RETURN(ret);
+       RETURN(ret);
 }
 
 static void ras_set_start(struct ll_readahead_state *ras, unsigned long index)
@@ -872,10 +872,10 @@ static void ras_stride_reset(struct ll_readahead_state *ras)
 
 void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
 {
-        cfs_spin_lock_init(&ras->ras_lock);
-        ras_reset(ras, 0);
-        ras->ras_requests = 0;
-        CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
+       spin_lock_init(&ras->ras_lock);
+       ras_reset(ras, 0);
+       ras->ras_requests = 0;
+       CFS_INIT_LIST_HEAD(&ras->ras_read_beads);
 }
 
 /*
@@ -990,14 +990,14 @@ static void ras_increase_window(struct ll_readahead_state *ras,
 }
 
 void ras_update(struct ll_sb_info *sbi, struct inode *inode,
-                struct ll_readahead_state *ras, unsigned long index,
-                unsigned hit)
+               struct ll_readahead_state *ras, unsigned long index,
+               unsigned hit)
 {
-        struct ll_ra_info *ra = &sbi->ll_ra_info;
-        int zero = 0, stride_detect = 0, ra_miss = 0;
-        ENTRY;
+       struct ll_ra_info *ra = &sbi->ll_ra_info;
+       int zero = 0, stride_detect = 0, ra_miss = 0;
+       ENTRY;
 
-        cfs_spin_lock(&ras->ras_lock);
+       spin_lock(&ras->ras_lock);
 
         ll_ra_stats_inc_sbi(sbi, hit ? RA_STAT_HIT : RA_STAT_MISS);
 
@@ -1134,10 +1134,10 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
                 ras_increase_window(ras, ra, inode);
         EXIT;
 out_unlock:
-        RAS_CDEBUG(ras);
-        ras->ras_request_index++;
-        cfs_spin_unlock(&ras->ras_lock);
-        return;
+       RAS_CDEBUG(ras);
+       ras->ras_request_index++;
+       spin_unlock(&ras->ras_lock);
+       return;
 }
 
 int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
index 3ac64c9..c52db14 100644 (file)
@@ -115,11 +115,11 @@ static inline int ll_sa_entry_hash(int val)
 static inline void
 ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
 {
-        int i = ll_sa_entry_hash(entry->se_qstr.hash);
+       int i = ll_sa_entry_hash(entry->se_qstr.hash);
 
-        cfs_spin_lock(&sai->sai_cache_lock[i]);
-        cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
-        cfs_spin_unlock(&sai->sai_cache_lock[i]);
+       spin_lock(&sai->sai_cache_lock[i]);
+       cfs_list_add_tail(&entry->se_hash, &sai->sai_cache[i]);
+       spin_unlock(&sai->sai_cache_lock[i]);
 }
 
 /*
@@ -128,11 +128,11 @@ ll_sa_entry_enhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
 static inline void
 ll_sa_entry_unhash(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
 {
-        int i = ll_sa_entry_hash(entry->se_qstr.hash);
+       int i = ll_sa_entry_hash(entry->se_qstr.hash);
 
-        cfs_spin_lock(&sai->sai_cache_lock[i]);
-        cfs_list_del_init(&entry->se_hash);
-        cfs_spin_unlock(&sai->sai_cache_lock[i]);
+       spin_lock(&sai->sai_cache_lock[i]);
+       cfs_list_del_init(&entry->se_hash);
+       spin_unlock(&sai->sai_cache_lock[i]);
 }
 
 static inline int agl_should_run(struct ll_statahead_info *sai,
@@ -249,14 +249,14 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
         entry->se_qstr.name = dname;
 
         lli = ll_i2info(sai->sai_inode);
-        cfs_spin_lock(&lli->lli_sa_lock);
-        cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
-        cfs_spin_unlock(&lli->lli_sa_lock);
+       spin_lock(&lli->lli_sa_lock);
+       cfs_list_add_tail(&entry->se_list, &sai->sai_entries_sent);
+       spin_unlock(&lli->lli_sa_lock);
 
-        cfs_atomic_inc(&sai->sai_cache_count);
-        ll_sa_entry_enhash(sai, entry);
+       cfs_atomic_inc(&sai->sai_cache_count);
+       ll_sa_entry_enhash(sai, entry);
 
-        RETURN(entry);
+       RETURN(entry);
 }
 
 /*
@@ -346,17 +346,17 @@ static void ll_sa_entry_put(struct ll_statahead_info *sai,
 static inline void
 do_sai_entry_fini(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
 {
-        struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
+       struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
 
-        ll_sa_entry_unhash(sai, entry);
+       ll_sa_entry_unhash(sai, entry);
 
-        cfs_spin_lock(&lli->lli_sa_lock);
-        entry->se_stat = SA_ENTRY_DEST;
-        if (likely(!ll_sa_entry_unlinked(entry)))
-                cfs_list_del_init(&entry->se_list);
-        cfs_spin_unlock(&lli->lli_sa_lock);
+       spin_lock(&lli->lli_sa_lock);
+       entry->se_stat = SA_ENTRY_DEST;
+       if (likely(!ll_sa_entry_unlinked(entry)))
+               cfs_list_del_init(&entry->se_list);
+       spin_unlock(&lli->lli_sa_lock);
 
-        ll_sa_entry_put(sai, entry);
+       ll_sa_entry_put(sai, entry);
 }
 
 /*
@@ -420,21 +420,21 @@ do_sai_entry_to_stated(struct ll_statahead_info *sai,
  */
 static int
 ll_sa_entry_to_stated(struct ll_statahead_info *sai,
-                       struct ll_sa_entry *entry, int rc)
+                     struct ll_sa_entry *entry, int rc)
 {
-        struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
-        int                   ret = 1;
+       struct ll_inode_info *lli = ll_i2info(sai->sai_inode);
+       int                   ret = 1;
 
-        ll_sa_entry_cleanup(sai, entry);
+       ll_sa_entry_cleanup(sai, entry);
 
-        cfs_spin_lock(&lli->lli_sa_lock);
-        if (likely(entry->se_stat != SA_ENTRY_DEST)) {
-                do_sai_entry_to_stated(sai, entry, rc);
-                ret = 0;
-        }
-        cfs_spin_unlock(&lli->lli_sa_lock);
+       spin_lock(&lli->lli_sa_lock);
+       if (likely(entry->se_stat != SA_ENTRY_DEST)) {
+               do_sai_entry_to_stated(sai, entry, rc);
+               ret = 0;
+       }
+       spin_unlock(&lli->lli_sa_lock);
 
-        return ret;
+       return ret;
 }
 
 /*
@@ -443,29 +443,29 @@ ll_sa_entry_to_stated(struct ll_statahead_info *sai,
 static void ll_agl_add(struct ll_statahead_info *sai,
                        struct inode *inode, int index)
 {
-        struct ll_inode_info *child  = ll_i2info(inode);
-        struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
-        int                   added  = 0;
-
-        cfs_spin_lock(&child->lli_agl_lock);
-        if (child->lli_agl_index == 0) {
-                child->lli_agl_index = index;
-                cfs_spin_unlock(&child->lli_agl_lock);
-
-                LASSERT(cfs_list_empty(&child->lli_agl_list));
-
-                igrab(inode);
-                cfs_spin_lock(&parent->lli_agl_lock);
-                if (agl_list_empty(sai))
-                        added = 1;
-                cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
-                cfs_spin_unlock(&parent->lli_agl_lock);
-        } else {
-                cfs_spin_unlock(&child->lli_agl_lock);
-        }
-
-        if (added > 0)
-                cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
+       struct ll_inode_info *child  = ll_i2info(inode);
+       struct ll_inode_info *parent = ll_i2info(sai->sai_inode);
+       int                   added  = 0;
+
+       spin_lock(&child->lli_agl_lock);
+       if (child->lli_agl_index == 0) {
+               child->lli_agl_index = index;
+               spin_unlock(&child->lli_agl_lock);
+
+               LASSERT(cfs_list_empty(&child->lli_agl_list));
+
+               igrab(inode);
+               spin_lock(&parent->lli_agl_lock);
+               if (agl_list_empty(sai))
+                       added = 1;
+               cfs_list_add_tail(&child->lli_agl_list, &sai->sai_entries_agl);
+               spin_unlock(&parent->lli_agl_lock);
+       } else {
+               spin_unlock(&child->lli_agl_lock);
+       }
+
+       if (added > 0)
+               cfs_waitq_signal(&sai->sai_agl_thread.t_ctl_waitq);
 }
 
 static struct ll_statahead_info *ll_sai_alloc(void)
@@ -480,11 +480,11 @@ static struct ll_statahead_info *ll_sai_alloc(void)
 
         cfs_atomic_set(&sai->sai_refcount, 1);
 
-        cfs_spin_lock(&sai_generation_lock);
-        sai->sai_generation = ++sai_generation;
-        if (unlikely(sai_generation == 0))
-                sai->sai_generation = ++sai_generation;
-        cfs_spin_unlock(&sai_generation_lock);
+       spin_lock(&sai_generation_lock);
+       sai->sai_generation = ++sai_generation;
+       if (unlikely(sai_generation == 0))
+               sai->sai_generation = ++sai_generation;
+       spin_unlock(&sai_generation_lock);
 
         sai->sai_max = LL_SA_RPC_MIN;
         sai->sai_index = 1;
@@ -499,7 +499,7 @@ static struct ll_statahead_info *ll_sai_alloc(void)
 
         for (i = 0; i < LL_SA_CACHE_SIZE; i++) {
                 CFS_INIT_LIST_HEAD(&sai->sai_cache[i]);
-                cfs_spin_lock_init(&sai->sai_cache_lock[i]);
+               spin_lock_init(&sai->sai_cache_lock[i]);
         }
         cfs_atomic_set(&sai->sai_cache_count, 0);
 
@@ -525,17 +525,17 @@ static void ll_sai_put(struct ll_statahead_info *sai)
                 if (unlikely(cfs_atomic_read(&sai->sai_refcount) > 0)) {
                         /* It is race case, the interpret callback just hold
                          * a reference count */
-                        cfs_spin_unlock(&lli->lli_sa_lock);
-                        RETURN_EXIT;
-                }
+                       spin_unlock(&lli->lli_sa_lock);
+                       RETURN_EXIT;
+               }
 
-                LASSERT(lli->lli_opendir_key == NULL);
-                LASSERT(thread_is_stopped(&sai->sai_thread));
-                LASSERT(thread_is_stopped(&sai->sai_agl_thread));
+               LASSERT(lli->lli_opendir_key == NULL);
+               LASSERT(thread_is_stopped(&sai->sai_thread));
+               LASSERT(thread_is_stopped(&sai->sai_agl_thread));
 
-                lli->lli_sai = NULL;
-                lli->lli_opendir_pid = 0;
-                cfs_spin_unlock(&lli->lli_sa_lock);
+               lli->lli_sai = NULL;
+               lli->lli_opendir_pid = 0;
+               spin_unlock(&lli->lli_sa_lock);
 
                 if (sai->sai_sent > sai->sai_replied)
                         CDEBUG(D_READA,"statahead for dir "DFID" does not "
@@ -581,7 +581,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
         }
 
         /* Someone is in glimpse (sync or async), do nothing. */
-        rc = cfs_down_write_trylock(&lli->lli_glimpse_sem);
+       rc = down_write_trylock(&lli->lli_glimpse_sem);
         if (rc == 0) {
                 lli->lli_agl_index = 0;
                 iput(inode);
@@ -603,7 +603,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
          */
         if (lli->lli_glimpse_time != 0 &&
             cfs_time_before(cfs_time_shift(-1), lli->lli_glimpse_time)) {
-                cfs_up_write(&lli->lli_glimpse_sem);
+               up_write(&lli->lli_glimpse_sem);
                 lli->lli_agl_index = 0;
                 iput(inode);
                 RETURN_EXIT;
@@ -615,7 +615,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
         cl_agl(inode);
         lli->lli_agl_index = 0;
         lli->lli_glimpse_time = cfs_time_current();
-        cfs_up_write(&lli->lli_glimpse_sem);
+       up_write(&lli->lli_glimpse_sem);
 
         CDEBUG(D_READA, "Handled (init) async glimpse: inode= "
                DFID", idx = "LPU64", rc = %d\n",
@@ -640,20 +640,20 @@ static void do_statahead_interpret(struct ll_statahead_info *sai,
         int                     rc    = 0;
         ENTRY;
 
-        cfs_spin_lock(&lli->lli_sa_lock);
-        if (target != NULL && target->se_req != NULL &&
-            !cfs_list_empty(&target->se_list)) {
-                entry = target;
-        } else if (unlikely(sa_received_empty(sai))) {
-                cfs_spin_unlock(&lli->lli_sa_lock);
-                RETURN_EXIT;
-        } else {
-                entry = sa_first_received_entry(sai);
-        }
-
-        cfs_atomic_inc(&entry->se_refcount);
-        cfs_list_del_init(&entry->se_list);
-        cfs_spin_unlock(&lli->lli_sa_lock);
+       spin_lock(&lli->lli_sa_lock);
+       if (target != NULL && target->se_req != NULL &&
+           !cfs_list_empty(&target->se_list)) {
+               entry = target;
+       } else if (unlikely(sa_received_empty(sai))) {
+               spin_unlock(&lli->lli_sa_lock);
+               RETURN_EXIT;
+       } else {
+               entry = sa_first_received_entry(sai);
+       }
+
+       cfs_atomic_inc(&entry->se_refcount);
+       cfs_list_del_init(&entry->se_list);
+       spin_unlock(&lli->lli_sa_lock);
 
         LASSERT(entry->se_handle != 0);
 
@@ -732,32 +732,32 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
         if (it_disposition(it, DISP_LOOKUP_NEG))
                 rc = -ENOENT;
 
-        cfs_spin_lock(&lli->lli_sa_lock);
-        /* stale entry */
-        if (unlikely(lli->lli_sai == NULL ||
-                     lli->lli_sai->sai_generation != minfo->mi_generation)) {
-                cfs_spin_unlock(&lli->lli_sa_lock);
-                GOTO(out, rc = -ESTALE);
-        } else {
-                sai = ll_sai_get(lli->lli_sai);
-                if (unlikely(!thread_is_running(&sai->sai_thread))) {
-                        sai->sai_replied++;
-                        cfs_spin_unlock(&lli->lli_sa_lock);
-                        GOTO(out, rc = -EBADFD);
-                }
-
-                entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
-                if (entry == NULL) {
-                        sai->sai_replied++;
-                        cfs_spin_unlock(&lli->lli_sa_lock);
-                        GOTO(out, rc = -EIDRM);
-                }
-
-                cfs_list_del_init(&entry->se_list);
-                if (rc != 0) {
-                        sai->sai_replied++;
-                        do_sai_entry_to_stated(sai, entry, rc);
-                        cfs_spin_unlock(&lli->lli_sa_lock);
+       spin_lock(&lli->lli_sa_lock);
+       /* stale entry */
+       if (unlikely(lli->lli_sai == NULL ||
+                    lli->lli_sai->sai_generation != minfo->mi_generation)) {
+               spin_unlock(&lli->lli_sa_lock);
+               GOTO(out, rc = -ESTALE);
+       } else {
+               sai = ll_sai_get(lli->lli_sai);
+               if (unlikely(!thread_is_running(&sai->sai_thread))) {
+                       sai->sai_replied++;
+                       spin_unlock(&lli->lli_sa_lock);
+                       GOTO(out, rc = -EBADFD);
+               }
+
+               entry = ll_sa_entry_get_byindex(sai, minfo->mi_cbdata);
+               if (entry == NULL) {
+                       sai->sai_replied++;
+                       spin_unlock(&lli->lli_sa_lock);
+                       GOTO(out, rc = -EIDRM);
+               }
+
+               cfs_list_del_init(&entry->se_list);
+               if (rc != 0) {
+                       sai->sai_replied++;
+                       do_sai_entry_to_stated(sai, entry, rc);
+                       spin_unlock(&lli->lli_sa_lock);
                         if (entry->se_index == sai->sai_index_wait)
                                 cfs_waitq_signal(&sai->sai_waitq);
                 } else {
@@ -773,7 +773,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
                         cfs_list_add_tail(&entry->se_list,
                                           &sai->sai_entries_received);
                         sai->sai_replied++;
-                        cfs_spin_unlock(&lli->lli_sa_lock);
+                       spin_unlock(&lli->lli_sa_lock);
                         if (wakeup)
                                 cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
                 }
@@ -1008,10 +1008,10 @@ static int ll_agl_thread(void *arg)
                cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
 
         atomic_inc(&sbi->ll_agl_total);
-        cfs_spin_lock(&plli->lli_agl_lock);
-        sai->sai_agl_valid = 1;
-        thread_set_flags(thread, SVC_RUNNING);
-        cfs_spin_unlock(&plli->lli_agl_lock);
+       spin_lock(&plli->lli_agl_lock);
+       sai->sai_agl_valid = 1;
+       thread_set_flags(thread, SVC_RUNNING);
+       spin_unlock(&plli->lli_agl_lock);
         cfs_waitq_signal(&thread->t_ctl_waitq);
 
         while (1) {
@@ -1023,36 +1023,36 @@ static int ll_agl_thread(void *arg)
                 if (!thread_is_running(thread))
                         break;
 
-                cfs_spin_lock(&plli->lli_agl_lock);
-                /* The statahead thread maybe help to process AGL entries,
-                 * so check whether list empty again. */
-                if (!agl_list_empty(sai)) {
-                        clli = agl_first_entry(sai);
-                        cfs_list_del_init(&clli->lli_agl_list);
-                        cfs_spin_unlock(&plli->lli_agl_lock);
-                        ll_agl_trigger(&clli->lli_vfs_inode, sai);
-                } else {
-                        cfs_spin_unlock(&plli->lli_agl_lock);
-                }
-        }
-
-        cfs_spin_lock(&plli->lli_agl_lock);
-        sai->sai_agl_valid = 0;
-        while (!agl_list_empty(sai)) {
-                clli = agl_first_entry(sai);
-                cfs_list_del_init(&clli->lli_agl_list);
-                cfs_spin_unlock(&plli->lli_agl_lock);
-                clli->lli_agl_index = 0;
-                iput(&clli->lli_vfs_inode);
-                cfs_spin_lock(&plli->lli_agl_lock);
-        }
-        thread_set_flags(thread, SVC_STOPPED);
-        cfs_spin_unlock(&plli->lli_agl_lock);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
-        ll_sai_put(sai);
-        CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
-               cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
-        RETURN(0);
+               spin_lock(&plli->lli_agl_lock);
+               /* The statahead thread maybe help to process AGL entries,
+                * so check whether list empty again. */
+               if (!agl_list_empty(sai)) {
+                       clli = agl_first_entry(sai);
+                       cfs_list_del_init(&clli->lli_agl_list);
+                       spin_unlock(&plli->lli_agl_lock);
+                       ll_agl_trigger(&clli->lli_vfs_inode, sai);
+               } else {
+                       spin_unlock(&plli->lli_agl_lock);
+               }
+       }
+
+       spin_lock(&plli->lli_agl_lock);
+       sai->sai_agl_valid = 0;
+       while (!agl_list_empty(sai)) {
+               clli = agl_first_entry(sai);
+               cfs_list_del_init(&clli->lli_agl_list);
+               spin_unlock(&plli->lli_agl_lock);
+               clli->lli_agl_index = 0;
+               iput(&clli->lli_vfs_inode);
+               spin_lock(&plli->lli_agl_lock);
+       }
+       thread_set_flags(thread, SVC_STOPPED);
+       spin_unlock(&plli->lli_agl_lock);
+       cfs_waitq_signal(&thread->t_ctl_waitq);
+       ll_sai_put(sai);
+       CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
+              cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
+       RETURN(0);
 }
 
 static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
@@ -1109,12 +1109,12 @@ static int ll_statahead_thread(void *arg)
                 ll_start_agl(parent, sai);
 
         atomic_inc(&sbi->ll_sa_total);
-        cfs_spin_lock(&plli->lli_sa_lock);
-        thread_set_flags(thread, SVC_RUNNING);
-        cfs_spin_unlock(&plli->lli_sa_lock);
-        cfs_waitq_signal(&thread->t_ctl_waitq);
+       spin_lock(&plli->lli_sa_lock);
+       thread_set_flags(thread, SVC_RUNNING);
+       spin_unlock(&plli->lli_sa_lock);
+       cfs_waitq_signal(&thread->t_ctl_waitq);
 
-        ll_dir_chain_init(&chain);
+       ll_dir_chain_init(&chain);
        page = ll_get_dir_page(dir, pos, &chain);
 
         while (1) {
@@ -1199,11 +1199,11 @@ interpret_it:
                          * some AGL entries to be triggered, then try to help
                          * to process the AGL entries. */
                         if (sa_sent_full(sai)) {
-                                cfs_spin_lock(&plli->lli_agl_lock);
-                                while (!agl_list_empty(sai)) {
-                                        clli = agl_first_entry(sai);
-                                        cfs_list_del_init(&clli->lli_agl_list);
-                                        cfs_spin_unlock(&plli->lli_agl_lock);
+                               spin_lock(&plli->lli_agl_lock);
+                               while (!agl_list_empty(sai)) {
+                                       clli = agl_first_entry(sai);
+                                       cfs_list_del_init(&clli->lli_agl_list);
+                                       spin_unlock(&plli->lli_agl_lock);
                                         ll_agl_trigger(&clli->lli_vfs_inode,
                                                        sai);
 
@@ -1219,9 +1219,9 @@ interpret_it:
                                         if (!sa_sent_full(sai))
                                                 goto do_it;
 
-                                        cfs_spin_lock(&plli->lli_agl_lock);
-                                }
-                                cfs_spin_unlock(&plli->lli_agl_lock);
+                                       spin_lock(&plli->lli_agl_lock);
+                               }
+                               spin_unlock(&plli->lli_agl_lock);
 
                                 goto keep_it;
                         }
@@ -1253,16 +1253,16 @@ do_it:
                                         break;
                         }
 
-                        cfs_spin_lock(&plli->lli_agl_lock);
-                        while (!agl_list_empty(sai) &&
-                               thread_is_running(thread)) {
-                                clli = agl_first_entry(sai);
-                                cfs_list_del_init(&clli->lli_agl_list);
-                                cfs_spin_unlock(&plli->lli_agl_lock);
-                                ll_agl_trigger(&clli->lli_vfs_inode, sai);
-                                cfs_spin_lock(&plli->lli_agl_lock);
-                        }
-                        cfs_spin_unlock(&plli->lli_agl_lock);
+                       spin_lock(&plli->lli_agl_lock);
+                       while (!agl_list_empty(sai) &&
+                              thread_is_running(thread)) {
+                               clli = agl_first_entry(sai);
+                               cfs_list_del_init(&clli->lli_agl_list);
+                               spin_unlock(&plli->lli_agl_lock);
+                               ll_agl_trigger(&clli->lli_vfs_inode, sai);
+                               spin_lock(&plli->lli_agl_lock);
+                       }
+                       spin_unlock(&plli->lli_agl_lock);
 
                         GOTO(out, rc = 0);
                 } else if (1) {
@@ -1287,9 +1287,9 @@ do_it:
 
 out:
         if (sai->sai_agl_valid) {
-                cfs_spin_lock(&plli->lli_agl_lock);
-                thread_set_flags(agl_thread, SVC_STOPPING);
-                cfs_spin_unlock(&plli->lli_agl_lock);
+               spin_lock(&plli->lli_agl_lock);
+               thread_set_flags(agl_thread, SVC_STOPPING);
+               spin_unlock(&plli->lli_agl_lock);
                 cfs_waitq_signal(&agl_thread->t_ctl_waitq);
 
                 CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
@@ -1302,19 +1302,19 @@ out:
                 thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
         }
         ll_dir_chain_fini(&chain);
-        cfs_spin_lock(&plli->lli_sa_lock);
-        if (!sa_received_empty(sai)) {
-                thread_set_flags(thread, SVC_STOPPING);
-                cfs_spin_unlock(&plli->lli_sa_lock);
-
-                /* To release the resources held by received entries. */
-                while (!sa_received_empty(sai))
-                        do_statahead_interpret(sai, NULL);
-
-                cfs_spin_lock(&plli->lli_sa_lock);
-        }
-        thread_set_flags(thread, SVC_STOPPED);
-        cfs_spin_unlock(&plli->lli_sa_lock);
+       spin_lock(&plli->lli_sa_lock);
+       if (!sa_received_empty(sai)) {
+               thread_set_flags(thread, SVC_STOPPING);
+               spin_unlock(&plli->lli_sa_lock);
+
+               /* To release the resources held by received entries. */
+               while (!sa_received_empty(sai))
+                       do_statahead_interpret(sai, NULL);
+
+               spin_lock(&plli->lli_sa_lock);
+       }
+       thread_set_flags(thread, SVC_STOPPED);
+       spin_unlock(&plli->lli_sa_lock);
         cfs_waitq_signal(&sai->sai_waitq);
         cfs_waitq_signal(&thread->t_ctl_waitq);
         ll_sai_put(sai);
@@ -1329,14 +1329,14 @@ out:
  */
 void ll_stop_statahead(struct inode *dir, void *key)
 {
-        struct ll_inode_info *lli = ll_i2info(dir);
+       struct ll_inode_info *lli = ll_i2info(dir);
 
-        if (unlikely(key == NULL))
-                return;
+       if (unlikely(key == NULL))
+               return;
 
-        cfs_spin_lock(&lli->lli_sa_lock);
-        if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
-                cfs_spin_unlock(&lli->lli_sa_lock);
+       spin_lock(&lli->lli_sa_lock);
+       if (lli->lli_opendir_key != key || lli->lli_opendir_pid == 0) {
+               spin_unlock(&lli->lli_sa_lock);
                 return;
         }
 
@@ -1348,28 +1348,28 @@ void ll_stop_statahead(struct inode *dir, void *key)
 
                 if (!thread_is_stopped(thread)) {
                         thread_set_flags(thread, SVC_STOPPING);
-                        cfs_spin_unlock(&lli->lli_sa_lock);
-                        cfs_waitq_signal(&thread->t_ctl_waitq);
-
-                        CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
-                               cfs_curproc_pid());
-                        l_wait_event(thread->t_ctl_waitq,
-                                     thread_is_stopped(thread),
-                                     &lwi);
-                } else {
-                        cfs_spin_unlock(&lli->lli_sa_lock);
-                }
-
-                /*
-                 * Put the ref which was held when first statahead_enter.
-                 * It maybe not the last ref for some statahead requests
-                 * maybe inflight.
-                 */
-                ll_sai_put(lli->lli_sai);
-        } else {
-                lli->lli_opendir_pid = 0;
-                cfs_spin_unlock(&lli->lli_sa_lock);
-        }
+                       spin_unlock(&lli->lli_sa_lock);
+                       cfs_waitq_signal(&thread->t_ctl_waitq);
+
+                       CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
+                              cfs_curproc_pid());
+                       l_wait_event(thread->t_ctl_waitq,
+                                    thread_is_stopped(thread),
+                                    &lwi);
+               } else {
+                       spin_unlock(&lli->lli_sa_lock);
+               }
+
+               /*
+                * Put the ref which was held when first statahead_enter.
+                * It maybe not the last ref for some statahead requests
+                * maybe inflight.
+                */
+               ll_sai_put(lli->lli_sai);
+       } else {
+               lli->lli_opendir_pid = 0;
+               spin_unlock(&lli->lli_sa_lock);
+       }
 }
 
 enum {
@@ -1533,17 +1533,17 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
                                PFID(&lli->lli_fid), sai->sai_hit,
                                sai->sai_miss, sai->sai_sent,
                                sai->sai_replied, cfs_curproc_pid());
-                        cfs_spin_lock(&lli->lli_sa_lock);
-                        if (!thread_is_stopped(thread))
-                                thread_set_flags(thread, SVC_STOPPING);
-                        cfs_spin_unlock(&lli->lli_sa_lock);
-                }
-        }
+                       spin_lock(&lli->lli_sa_lock);
+                       if (!thread_is_stopped(thread))
+                               thread_set_flags(thread, SVC_STOPPING);
+                       spin_unlock(&lli->lli_sa_lock);
+               }
+       }
 
-        if (!thread_is_stopped(thread))
-                cfs_waitq_signal(&thread->t_ctl_waitq);
+       if (!thread_is_stopped(thread))
+               cfs_waitq_signal(&thread->t_ctl_waitq);
 
-        EXIT;
+       EXIT;
 }
 
 /**
@@ -1735,9 +1735,9 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
 out:
         if (sai != NULL)
                 OBD_FREE_PTR(sai);
-        cfs_spin_lock(&lli->lli_sa_lock);
-        lli->lli_opendir_key = NULL;
-        lli->lli_opendir_pid = 0;
-        cfs_spin_unlock(&lli->lli_sa_lock);
-        return rc;
+       spin_lock(&lli->lli_sa_lock);
+       lli->lli_opendir_key = NULL;
+       lli->lli_opendir_pid = 0;
+       spin_unlock(&lli->lli_sa_lock);
+       return rc;
 }
index b203de3..b7deff2 100644 (file)
@@ -372,7 +372,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
                         /* got an object. Find next page. */
                         hdr = cl_object_header(clob);
 
-                        cfs_spin_lock(&hdr->coh_page_guard);
+                       spin_lock(&hdr->coh_page_guard);
                         nr = radix_tree_gang_lookup(&hdr->coh_tree,
                                                     (void **)&pg,
                                                     id.vpi_index, 1);
@@ -381,7 +381,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
                                 /* Cant support over 16T file */
                                 nr = !(pg->cp_index > 0xffffffff);
                         }
-                        cfs_spin_unlock(&hdr->coh_page_guard);
+                       spin_unlock(&hdr->coh_page_guard);
 
                         lu_object_ref_del(&clob->co_lu, "dump", cfs_current());
                         cl_object_put(env, clob);
@@ -398,7 +398,7 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
 }
 
 #define seq_page_flag(seq, page, flag, has_flags) do {                  \
-        if (cfs_test_bit(PG_##flag, &(page)->flags)) {                  \
+       if (test_bit(PG_##flag, &(page)->flags)) {                  \
                 seq_printf(seq, "%s"#flag, has_flags ? "|" : "");       \
                 has_flags = 1;                                          \
         }                                                               \
@@ -455,9 +455,9 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
                 if (clob != NULL) {
                         hdr = cl_object_header(clob);
 
-                        cfs_spin_lock(&hdr->coh_page_guard);
-                        page = cl_page_lookup(hdr, id.vpi_index);
-                        cfs_spin_unlock(&hdr->coh_page_guard);
+                       spin_lock(&hdr->coh_page_guard);
+                       page = cl_page_lookup(hdr, id.vpi_index);
+                       spin_unlock(&hdr->coh_page_guard);
 
                         seq_printf(f, "%8x@"DFID": ",
                                    id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
index 48b3fcb..479cbb4 100644 (file)
@@ -336,9 +336,9 @@ int ll_getxattr_common(struct inode *inode, const char *name,
                 struct ll_inode_info *lli = ll_i2info(inode);
                 struct posix_acl *acl;
 
-                cfs_spin_lock(&lli->lli_lock);
-                acl = posix_acl_dup(lli->lli_posix_acl);
-                cfs_spin_unlock(&lli->lli_lock);
+               spin_lock(&lli->lli_lock);
+               acl = posix_acl_dup(lli->lli_posix_acl);
+               spin_unlock(&lli->lli_lock);
 
                 if (!acl)
                         RETURN(-ENODATA);
index d984220..981e271 100644 (file)
@@ -42,8 +42,8 @@
 
 #define LMV_MAX_TGT_COUNT 128
 
-#define lmv_init_lock(lmv)   cfs_mutex_lock(&lmv->init_mutex);
-#define lmv_init_unlock(lmv) cfs_mutex_unlock(&lmv->init_mutex);
+#define lmv_init_lock(lmv)   mutex_lock(&lmv->init_mutex);
+#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex);
 
 #define LL_IT2STR(it)                                  \
        ((it) ? ldlm_it2str((it)->it_op) : "0")
@@ -77,7 +77,7 @@ struct lmv_object {
         /**
          * Sema for protecting fields.
          */
-        cfs_mutex_t             lo_guard;
+       struct mutex            lo_guard;
         /**
          * Object state like O_FREEING.
          */
@@ -115,14 +115,14 @@ static inline void
 lmv_object_lock(struct lmv_object *obj)
 {
         LASSERT(obj);
-        cfs_mutex_lock(&obj->lo_guard);
+       mutex_lock(&obj->lo_guard);
 }
 
 static inline void
 lmv_object_unlock(struct lmv_object *obj)
 {
         LASSERT(obj);
-        cfs_mutex_unlock(&obj->lo_guard);
+       mutex_unlock(&obj->lo_guard);
 }
 
 void lmv_object_add(struct lmv_object *obj);
index f3e3280..5ed30c8 100644 (file)
@@ -93,7 +93,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
         CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
                lmv, uuid->uuid, activate);
 
-        cfs_spin_lock(&lmv->lmv_lock);
+       spin_lock(&lmv->lmv_lock);
         for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
                 if (tgt->ltd_exp == NULL)
                         continue;
@@ -129,31 +129,31 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
         EXIT;
 
  out_lmv_lock:
-        cfs_spin_unlock(&lmv->lmv_lock);
-        return rc;
+       spin_unlock(&lmv->lmv_lock);
+       return rc;
 }
 
 static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid,
-                            struct obd_connect_data *data)
+                           struct obd_connect_data *data)
 {
-        struct lmv_tgt_desc    *tgt;
-        int                     i;
-        ENTRY;
+       struct lmv_tgt_desc    *tgt;
+       int                     i;
+       ENTRY;
 
-        LASSERT(data != NULL);
+       LASSERT(data != NULL);
 
-        cfs_spin_lock(&lmv->lmv_lock);
-        for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
-                if (tgt->ltd_exp == NULL)
-                        continue;
+       spin_lock(&lmv->lmv_lock);
+       for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
+               if (tgt->ltd_exp == NULL)
+                       continue;
 
-                if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
-                        lmv->datas[tgt->ltd_idx] = *data;
-                        break;
-                }
-        }
-        cfs_spin_unlock(&lmv->lmv_lock);
-        RETURN(0);
+               if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
+                       lmv->datas[tgt->ltd_idx] = *data;
+                       break;
+               }
+       }
+       spin_unlock(&lmv->lmv_lock);
+       RETURN(0);
 }
 
 struct obd_uuid *lmv_get_uuid(struct obd_export *exp) {
@@ -515,18 +515,18 @@ int lmv_add_target(struct obd_device *obd, struct obd_uuid *tgt_uuid)
                         RETURN(-EINVAL);
                 }
         }
-        cfs_spin_lock(&lmv->lmv_lock);
-        tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
-        tgt->ltd_uuid = *tgt_uuid;
-        cfs_spin_unlock(&lmv->lmv_lock);
-
-        if (lmv->connected) {
-                rc = lmv_connect_mdc(obd, tgt);
-                if (rc) {
-                        cfs_spin_lock(&lmv->lmv_lock);
-                        lmv->desc.ld_tgt_count--;
-                        memset(tgt, 0, sizeof(*tgt));
-                        cfs_spin_unlock(&lmv->lmv_lock);
+       spin_lock(&lmv->lmv_lock);
+       tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
+       tgt->ltd_uuid = *tgt_uuid;
+       spin_unlock(&lmv->lmv_lock);
+
+       if (lmv->connected) {
+               rc = lmv_connect_mdc(obd, tgt);
+               if (rc) {
+                       spin_lock(&lmv->lmv_lock);
+                       lmv->desc.ld_tgt_count--;
+                       memset(tgt, 0, sizeof(*tgt));
+                       spin_unlock(&lmv->lmv_lock);
                 } else {
                         int easize = sizeof(struct lmv_stripe_md) +
                                      lmv->desc.ld_tgt_count *
@@ -981,7 +981,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
          * New seq alloc and FLD setup should be atomic. Otherwise we may find
          * on server that seq in new allocated fid is not yet known.
          */
-        cfs_mutex_lock(&tgt->ltd_fid_mutex);
+       mutex_lock(&tgt->ltd_fid_mutex);
 
         if (!tgt->ltd_active)
                 GOTO(out, rc = -ENODEV);
@@ -997,7 +997,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
 
         EXIT;
 out:
-        cfs_mutex_unlock(&tgt->ltd_fid_mutex);
+       mutex_unlock(&tgt->ltd_fid_mutex);
         return rc;
 }
 
@@ -1068,7 +1068,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
                 RETURN(-ENOMEM);
 
         for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
-                cfs_mutex_init(&lmv->tgts[i].ltd_fid_mutex);
+               mutex_init(&lmv->tgts[i].ltd_fid_mutex);
                 lmv->tgts[i].ltd_idx = i;
         }
 
@@ -1086,8 +1086,8 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         lmv->max_easize = 0;
         lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
 
-        cfs_spin_lock_init(&lmv->lmv_lock);
-        cfs_mutex_init(&lmv->init_mutex);
+       spin_lock_init(&lmv->lmv_lock);
+       mutex_init(&lmv->init_mutex);
 
         rc = lmv_object_setup(obd);
         if (rc) {
index f54f7b3..cd96b2b 100644 (file)
@@ -86,7 +86,7 @@ struct lmv_object *lmv_object_alloc(struct obd_device *obd,
         obj->lo_state = 0;
         obj->lo_hashtype = mea->mea_magic;
 
-        cfs_mutex_init(&obj->lo_guard);
+       mutex_init(&obj->lo_guard);
         cfs_atomic_set(&obj->lo_count, 0);
         obj->lo_objcount = mea->mea_count;
 
@@ -146,9 +146,9 @@ static void __lmv_object_add(struct lmv_object *obj)
 
 void lmv_object_add(struct lmv_object *obj)
 {
-        cfs_spin_lock(&obj_list_lock);
-        __lmv_object_add(obj);
-        cfs_spin_unlock(&obj_list_lock);
+       spin_lock(&obj_list_lock);
+       __lmv_object_add(obj);
+       spin_unlock(&obj_list_lock);
 }
 
 static void __lmv_object_del(struct lmv_object *obj)
@@ -159,9 +159,9 @@ static void __lmv_object_del(struct lmv_object *obj)
 
 void lmv_object_del(struct lmv_object *obj)
 {
-        cfs_spin_lock(&obj_list_lock);
-        __lmv_object_del(obj);
-        cfs_spin_unlock(&obj_list_lock);
+       spin_lock(&obj_list_lock);
+       __lmv_object_del(obj);
+       spin_unlock(&obj_list_lock);
 }
 
 static struct lmv_object *__lmv_object_get(struct lmv_object *obj)
@@ -173,10 +173,10 @@ static struct lmv_object *__lmv_object_get(struct lmv_object *obj)
 
 struct lmv_object *lmv_object_get(struct lmv_object *obj)
 {
-        cfs_spin_lock(&obj_list_lock);
-        __lmv_object_get(obj);
-        cfs_spin_unlock(&obj_list_lock);
-        return obj;
+       spin_lock(&obj_list_lock);
+       __lmv_object_get(obj);
+       spin_unlock(&obj_list_lock);
+       return obj;
 }
 
 static void __lmv_object_put(struct lmv_object *obj)
@@ -192,9 +192,9 @@ static void __lmv_object_put(struct lmv_object *obj)
 
 void lmv_object_put(struct lmv_object *obj)
 {
-        cfs_spin_lock(&obj_list_lock);
-        __lmv_object_put(obj);
-        cfs_spin_unlock(&obj_list_lock);
+       spin_lock(&obj_list_lock);
+       __lmv_object_put(obj);
+       spin_unlock(&obj_list_lock);
 }
 
 void lmv_object_put_unlock(struct lmv_object *obj)
@@ -239,20 +239,20 @@ static struct lmv_object *__lmv_object_find(struct obd_device *obd, const struct
 }
 
 struct lmv_object *lmv_object_find(struct obd_device *obd,
-                                   const struct lu_fid *fid)
+                                  const struct lu_fid *fid)
 {
-        struct lmv_obd          *lmv = &obd->u.lmv;
-        struct lmv_object       *obj = NULL;
-        ENTRY;
-
-        /* For single MDT case, lmv_object list is always empty. */
-        if (lmv->desc.ld_tgt_count > 1) {
-                cfs_spin_lock(&obj_list_lock);
-                obj = __lmv_object_find(obd, fid);
-                cfs_spin_unlock(&obj_list_lock);
-        }
-
-        RETURN(obj);
+       struct lmv_obd          *lmv = &obd->u.lmv;
+       struct lmv_object       *obj = NULL;
+       ENTRY;
+
+       /* For single MDT case, lmv_object list is always empty. */
+       if (lmv->desc.ld_tgt_count > 1) {
+               spin_lock(&obj_list_lock);
+               obj = __lmv_object_find(obd, fid);
+               spin_unlock(&obj_list_lock);
+       }
+
+       RETURN(obj);
 }
 
 struct lmv_object *lmv_object_find_lock(struct obd_device *obd, 
@@ -284,30 +284,29 @@ static struct lmv_object *__lmv_object_create(struct obd_device *obd,
         if (!new)
                 RETURN(NULL);
 
-        /* 
-         * Check if someone created it already while we were dealing with
-         * allocating @obj. 
-         */
-        cfs_spin_lock(&obj_list_lock);
-        obj = __lmv_object_find(obd, fid);
-        if (obj) {
-                /* 
-                 * Someone created it already - put @obj and getting out. 
-                 */
-                cfs_spin_unlock(&obj_list_lock);
-                lmv_object_free(new);
-                RETURN(obj);
-        }
-
-        __lmv_object_add(new);
-        __lmv_object_get(new);
-
-        cfs_spin_unlock(&obj_list_lock);
-
-        CDEBUG(D_INODE, "New obj in lmv cache: "DFID"\n",
-               PFID(fid));
-
-        RETURN(new);
+       /*
+        * Check if someone created it already while we were dealing with
+        * allocating @obj.
+        */
+       spin_lock(&obj_list_lock);
+       obj = __lmv_object_find(obd, fid);
+       if (obj) {
+               /*
+                * Someone created it already - put @obj and getting out.
+                */
+               spin_unlock(&obj_list_lock);
+               lmv_object_free(new);
+               RETURN(obj);
+       }
+
+       __lmv_object_add(new);
+       __lmv_object_get(new);
+
+       spin_unlock(&obj_list_lock);
+
+       CDEBUG(D_INODE, "New obj in lmv cache: "DFID"\n", PFID(fid));
+
+       RETURN(new);
 }
 
 struct lmv_object *lmv_object_create(struct obd_export *exp, 
@@ -394,21 +393,21 @@ cleanup:
 
 int lmv_object_delete(struct obd_export *exp, const struct lu_fid *fid)
 {
-        struct obd_device       *obd = exp->exp_obd;
-        struct lmv_object       *obj;
-        int                      rc = 0;
-        ENTRY;
-
-        cfs_spin_lock(&obj_list_lock);
-        obj = __lmv_object_find(obd, fid);
-        if (obj) {
-                obj->lo_state |= O_FREEING;
-                __lmv_object_put(obj);
-                __lmv_object_put(obj);
-                rc = 1;
-        }
-        cfs_spin_unlock(&obj_list_lock);
-        RETURN(rc);
+       struct obd_device       *obd = exp->exp_obd;
+       struct lmv_object       *obj;
+       int                      rc = 0;
+       ENTRY;
+
+       spin_lock(&obj_list_lock);
+       obj = __lmv_object_find(obd, fid);
+       if (obj) {
+               obj->lo_state |= O_FREEING;
+               __lmv_object_put(obj);
+               __lmv_object_put(obj);
+               rc = 1;
+       }
+       spin_unlock(&obj_list_lock);
+       RETURN(rc);
 }
 
 int lmv_object_setup(struct obd_device *obd)
@@ -432,7 +431,7 @@ void lmv_object_cleanup(struct obd_device *obd)
         CDEBUG(D_INFO, "LMV object manager cleanup (%s)\n",
                obd->obd_uuid.uuid);
 
-        cfs_spin_lock(&obj_list_lock);
+       spin_lock(&obj_list_lock);
         cfs_list_for_each_safe(cur, tmp, &obj_list) {
                 obj = cfs_list_entry(cur, struct lmv_object, lo_list);
 
@@ -447,6 +446,6 @@ void lmv_object_cleanup(struct obd_device *obd)
                 }
                 __lmv_object_put(obj);
         }
-        cfs_spin_unlock(&obj_list_lock);
-        EXIT;
+       spin_unlock(&obj_list_lock);
+       EXIT;
 }
index a033d07..f7041e6 100644 (file)
@@ -119,9 +119,9 @@ static int lmv_wr_placement(struct file *file, const char *buffer,
 
         policy = placement_name2policy(dummy, len);
         if (policy != PLACEMENT_INVAL_POLICY) {
-                cfs_spin_lock(&lmv->lmv_lock);
-                lmv->lmv_placement = policy;
-                cfs_spin_unlock(&lmv->lmv_lock);
+               spin_lock(&lmv->lmv_lock);
+               lmv->lmv_placement = policy;
+               spin_unlock(&lmv->lmv_lock);
         } else {
                 CERROR("Invalid placement policy \"%s\"!\n", dummy);
                 return -EINVAL;
index e0bf92b..7340df9 100644 (file)
@@ -453,9 +453,9 @@ static int lod_init0(const struct lu_env *env, struct lod_device *lod,
                }
        }
 
-       cfs_mutex_init(&lod->lod_mutex);
-       cfs_init_rwsem(&lod->lod_rw_sem);
-       cfs_spin_lock_init(&lod->lod_desc_lock);
+       mutex_init(&lod->lod_mutex);
+       init_rwsem(&lod->lod_rw_sem);
+       spin_lock_init(&lod->lod_desc_lock);
 
        RETURN(0);
 
@@ -536,11 +536,11 @@ static int lod_obd_connect(const struct lu_env *env, struct obd_export **exp,
 
        *exp = class_conn2export(&conn);
 
-       cfs_mutex_lock(&lod->lod_mutex);
+       mutex_lock(&lod->lod_mutex);
        lod->lod_connects++;
        /* at the moment we expect the only user */
        LASSERT(lod->lod_connects == 1);
-       cfs_mutex_unlock(&lod->lod_mutex);
+       mutex_unlock(&lod->lod_mutex);
 
        RETURN(0);
 }
@@ -557,16 +557,16 @@ static int lod_obd_disconnect(struct obd_export *exp)
        ENTRY;
 
        /* Only disconnect the underlying layers on the final disconnect. */
-       cfs_mutex_lock(&lod->lod_mutex);
+       mutex_lock(&lod->lod_mutex);
        lod->lod_connects--;
        if (lod->lod_connects != 0) {
                /* why should there be more than 1 connect? */
-               cfs_mutex_unlock(&lod->lod_mutex);
+               mutex_unlock(&lod->lod_mutex);
                CERROR("%s: disconnect #%d\n", exp->exp_obd->obd_name,
                       lod->lod_connects);
                goto out;
        }
-       cfs_mutex_unlock(&lod->lod_mutex);
+       mutex_unlock(&lod->lod_mutex);
 
        /* the last user of lod has gone, let's release the device */
        release = 1;
index 6ff1c76..1a76ef5 100644 (file)
@@ -81,7 +81,7 @@ struct lod_device {
        struct lov_desc       lod_desc;
 
        /* use to protect ld_active_tgt_count and all ltd_active */
-       cfs_spinlock_t        lod_desc_lock;
+       spinlock_t           lod_desc_lock;
 
        /* list of known OSTs */
        struct lod_ost_desc_idx *lod_ost_idx[OST_PTRS];
@@ -101,9 +101,9 @@ struct lod_device {
        /* Table refcount used for delayed deletion */
        int                   lod_refcount;
        /* mutex to serialize concurrent updates to the ost table */
-       cfs_mutex_t           lod_mutex;
+       struct mutex          lod_mutex;
        /* read/write semaphore used for array relocation */
-       cfs_rw_semaphore_t    lod_rw_sem;
+       struct rw_semaphore     lod_rw_sem;
 
        /* QoS info per LOD */
        struct lov_qos        lod_qos; /* qos info per lod */
index 0338879..8a1ca48 100644 (file)
  */
 void lod_getref(struct lod_device *lod)
 {
-       cfs_down_read(&lod->lod_rw_sem);
-       cfs_mutex_lock(&lod->lod_mutex);
+       down_read(&lod->lod_rw_sem);
+       mutex_lock(&lod->lod_mutex);
        lod->lod_refcount++;
-       cfs_mutex_unlock(&lod->lod_mutex);
+       mutex_unlock(&lod->lod_mutex);
 }
 
 /*
@@ -63,7 +63,7 @@ void lod_getref(struct lod_device *lod)
  */
 void lod_putref(struct lod_device *lod)
 {
-       cfs_mutex_lock(&lod->lod_mutex);
+       mutex_lock(&lod->lod_mutex);
        lod->lod_refcount--;
        if (lod->lod_refcount == 0 && lod->lod_death_row) {
                struct lod_ost_desc *ost_desc, *tmp;
@@ -90,8 +90,8 @@ void lod_putref(struct lod_device *lod)
                                lod->lod_desc.ld_active_tgt_count--;
                        lod->lod_death_row--;
                }
-               cfs_mutex_unlock(&lod->lod_mutex);
-               cfs_up_read(&lod->lod_rw_sem);
+               mutex_unlock(&lod->lod_mutex);
+               up_read(&lod->lod_rw_sem);
 
                cfs_list_for_each_entry_safe(ost_desc, tmp, &kill, ltd_kill) {
                        int rc;
@@ -110,8 +110,8 @@ void lod_putref(struct lod_device *lod)
                        OBD_FREE_PTR(ost_desc);
                }
        } else {
-               cfs_mutex_unlock(&lod->lod_mutex);
-               cfs_up_read(&lod->lod_rw_sem);
+               mutex_unlock(&lod->lod_mutex);
+               up_read(&lod->lod_rw_sem);
        }
 }
 
@@ -123,7 +123,7 @@ static int lod_bitmap_resize(struct lod_device *lod, __u32 newsize)
 
        /* grab write reference on the lod. Relocating the array requires
         * exclusive access */
-       cfs_down_write(&lod->lod_rw_sem);
+       down_write(&lod->lod_rw_sem);
 
        if (newsize <= lod->lod_osts_size)
                /* someone else has already resize the array */
@@ -151,7 +151,7 @@ static int lod_bitmap_resize(struct lod_device *lod, __u32 newsize)
 
        EXIT;
 out:
-       cfs_up_write(&lod->lod_rw_sem);
+       up_write(&lod->lod_rw_sem);
        return rc;
 }
 
@@ -249,7 +249,7 @@ int lod_add_device(const struct lu_env *env, struct lod_device *lod,
                lod_getref(lod);
        }
 
-       cfs_mutex_lock(&lod->lod_mutex);
+       mutex_lock(&lod->lod_mutex);
        if (cfs_bitmap_check(lod->lod_ost_bitmap, index)) {
                CERROR("%s: device %d is registered already\n", obd->obd_name,
                       index);
@@ -287,7 +287,7 @@ int lod_add_device(const struct lu_env *env, struct lod_device *lod,
        OST_TGT(lod, index) = ost_desc;
        cfs_bitmap_set(lod->lod_ost_bitmap, index);
        lod->lod_ostnr++;
-       cfs_mutex_unlock(&lod->lod_mutex);
+       mutex_unlock(&lod->lod_mutex);
        lod_putref(lod);
 
        if (lod->lod_recovery_completed)
@@ -298,7 +298,7 @@ int lod_add_device(const struct lu_env *env, struct lod_device *lod,
 out_pool:
        lod_ost_pool_remove(&lod->lod_pool_info, index);
 out_mutex:
-       cfs_mutex_unlock(&lod->lod_mutex);
+       mutex_unlock(&lod->lod_mutex);
        lod_putref(lod);
 out_desc:
        OBD_FREE_PTR(ost_desc);
@@ -358,7 +358,7 @@ int lod_del_device(const struct lu_env *env, struct lod_device *lod,
        obd_str2uuid(&uuid,  osp);
 
        lod_getref(lod);
-       cfs_mutex_lock(&lod->lod_mutex);
+       mutex_lock(&lod->lod_mutex);
        /* check that the index is allocated in the bitmap */
        if (!cfs_bitmap_check(lod->lod_ost_bitmap, idx) || !OST_TGT(lod,idx)) {
                CERROR("%s: device %d is not set up\n", obd->obd_name, idx);
@@ -376,7 +376,7 @@ int lod_del_device(const struct lu_env *env, struct lod_device *lod,
        __lod_del_device(lod, idx);
        EXIT;
 out:
-       cfs_mutex_unlock(&lod->lod_mutex);
+       mutex_unlock(&lod->lod_mutex);
        lod_putref(lod);
        return(rc);
 }
@@ -890,7 +890,7 @@ int lod_pools_init(struct lod_device *lod, struct lustre_cfg *lcfg)
 
        /* Set up allocation policy (QoS and RR) */
        CFS_INIT_LIST_HEAD(&lod->lod_qos.lq_oss_list);
-       cfs_init_rwsem(&lod->lod_qos.lq_rw_sem);
+       init_rwsem(&lod->lod_qos.lq_rw_sem);
        lod->lod_qos.lq_dirty = 1;
        lod->lod_qos.lq_rr.lqr_dirty = 1;
        lod->lod_qos.lq_reset = 1;
@@ -987,10 +987,10 @@ int lod_pools_fini(struct lod_device *lod)
        if (lod->lod_osts_size > 0) {
                int idx;
                lod_getref(lod);
-               cfs_mutex_lock(&lod->lod_mutex);
+               mutex_lock(&lod->lod_mutex);
                cfs_foreach_bit(lod->lod_ost_bitmap, idx)
                        __lod_del_device(lod, idx);
-               cfs_mutex_unlock(&lod->lod_mutex);
+               mutex_unlock(&lod->lod_mutex);
                lod_putref(lod);
                CFS_FREE_BITMAP(lod->lod_ost_bitmap);
                for (idx = 0; idx < OST_PTRS; idx++) {
index c9d4901..df05ba9 100644 (file)
@@ -181,14 +181,14 @@ static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
 
        /* iterate to find a non empty entry */
        prev_idx = iter->idx;
-       cfs_down_read(&pool_tgt_rw_sem(iter->pool));
+       down_read(&pool_tgt_rw_sem(iter->pool));
        iter->idx++;
        if (iter->idx == pool_tgt_count(iter->pool)) {
                iter->idx = prev_idx; /* we stay on the last entry */
-               cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+               up_read(&pool_tgt_rw_sem(iter->pool));
                return NULL;
        }
-       cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+       up_read(&pool_tgt_rw_sem(iter->pool));
        (*pos)++;
        /* return != NULL to continue */
        return iter;
@@ -258,9 +258,9 @@ static int pool_proc_show(struct seq_file *s, void *v)
        LASSERT(iter->pool != NULL);
        LASSERT(iter->idx <= pool_tgt_count(iter->pool));
 
-       cfs_down_read(&pool_tgt_rw_sem(iter->pool));
+       down_read(&pool_tgt_rw_sem(iter->pool));
        osc_desc = pool_tgt(iter->pool, iter->idx);
-       cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+       up_read(&pool_tgt_rw_sem(iter->pool));
        if (osc_desc)
                seq_printf(s, "%s\n", obd_uuid2str(&(osc_desc->ltd_uuid)));
 
@@ -302,7 +302,7 @@ void lod_dump_pool(int level, struct pool_desc *pool)
 
        CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n",
               pool->pool_name, pool->pool_obds.op_count);
-       cfs_down_read(&pool_tgt_rw_sem(pool));
+       down_read(&pool_tgt_rw_sem(pool));
 
        for (i = 0; i < pool_tgt_count(pool) ; i++) {
                if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp)
@@ -312,7 +312,7 @@ void lod_dump_pool(int level, struct pool_desc *pool)
                       obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid)));
        }
 
-       cfs_up_read(&pool_tgt_rw_sem(pool));
+       up_read(&pool_tgt_rw_sem(pool));
        lod_pool_putref(pool);
 }
 
@@ -325,7 +325,7 @@ int lod_ost_pool_init(struct ost_pool *op, unsigned int count)
                count = LOD_POOL_INIT_COUNT;
        op->op_array = NULL;
        op->op_count = 0;
-       cfs_init_rwsem(&op->op_rw_sem);
+       init_rwsem(&op->op_rw_sem);
        op->op_size = count;
        OBD_ALLOC(op->op_array, op->op_size * sizeof(op->op_array[0]));
        if (op->op_array == NULL) {
@@ -365,7 +365,7 @@ int lod_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
        int rc = 0, i;
        ENTRY;
 
-       cfs_down_write(&op->op_rw_sem);
+       down_write(&op->op_rw_sem);
 
        rc = lod_ost_pool_extend(op, min_count);
        if (rc)
@@ -381,7 +381,7 @@ int lod_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
        op->op_count++;
        EXIT;
 out:
-       cfs_up_write(&op->op_rw_sem);
+       up_write(&op->op_rw_sem);
        return rc;
 }
 
@@ -390,20 +390,20 @@ int lod_ost_pool_remove(struct ost_pool *op, __u32 idx)
        int i;
        ENTRY;
 
-       cfs_down_write(&op->op_rw_sem);
+       down_write(&op->op_rw_sem);
 
        for (i = 0; i < op->op_count; i++) {
                if (op->op_array[i] == idx) {
                        memmove(&op->op_array[i], &op->op_array[i + 1],
                                (op->op_count - i - 1) * sizeof(op->op_array[0]));
                        op->op_count--;
-                       cfs_up_write(&op->op_rw_sem);
+                       up_write(&op->op_rw_sem);
                        EXIT;
                        return 0;
                }
        }
 
-       cfs_up_write(&op->op_rw_sem);
+       up_write(&op->op_rw_sem);
        RETURN(-EINVAL);
 }
 
@@ -414,14 +414,14 @@ int lod_ost_pool_free(struct ost_pool *op)
        if (op->op_size == 0)
                RETURN(0);
 
-       cfs_down_write(&op->op_rw_sem);
+       down_write(&op->op_rw_sem);
 
        OBD_FREE(op->op_array, op->op_size * sizeof(op->op_array[0]));
        op->op_array = NULL;
        op->op_count = 0;
        op->op_size = 0;
 
-       cfs_up_write(&op->op_rw_sem);
+       up_write(&op->op_rw_sem);
        RETURN(0);
 }
 
@@ -471,10 +471,10 @@ int lod_pool_new(struct obd_device *obd, char *poolname)
        CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool, new_pool->pool_proc_entry);
 #endif
 
-       cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
        cfs_list_add_tail(&new_pool->pool_list, &lod->lod_pool_list);
        lod->lod_pool_count++;
-       cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
        /* add to find only when it fully ready  */
        rc = cfs_hash_add_unique(lod->lod_pools_hash_body, poolname,
@@ -488,10 +488,10 @@ int lod_pool_new(struct obd_device *obd, char *poolname)
        RETURN(0);
 
 out_err:
-       cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
        cfs_list_del_init(&new_pool->pool_list);
        lod->lod_pool_count--;
-       cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
        lprocfs_remove(&new_pool->pool_proc_entry);
 
@@ -519,10 +519,10 @@ int lod_pool_del(struct obd_device *obd, char *poolname)
                lod_pool_putref(pool);
        }
 
-       cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
        cfs_list_del_init(&pool->pool_list);
        lod->lod_pool_count--;
-       cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
        /* release last reference */
        lod_pool_putref(pool);
@@ -627,7 +627,7 @@ int lod_check_index_in_pool(__u32 idx, struct pool_desc *pool)
         */
        lod_pool_getref(pool);
 
-       cfs_down_read(&pool_tgt_rw_sem(pool));
+       down_read(&pool_tgt_rw_sem(pool));
 
        for (i = 0; i < pool_tgt_count(pool); i++) {
                if (pool_tgt_array(pool)[i] == idx)
@@ -636,7 +636,7 @@ int lod_check_index_in_pool(__u32 idx, struct pool_desc *pool)
        rc = -ENOENT;
        EXIT;
 out:
-       cfs_up_read(&pool_tgt_rw_sem(pool));
+       up_read(&pool_tgt_rw_sem(pool));
 
        lod_pool_putref(pool);
        return rc;
index e5b4565..e59b904 100644 (file)
@@ -67,7 +67,7 @@ int qos_add_tgt(struct lod_device *lod, struct lod_ost_desc *ost_desc)
        cfs_list_t         *list;
        ENTRY;
 
-       cfs_down_write(&lod->lod_qos.lq_rw_sem);
+       down_write(&lod->lod_qos.lq_rw_sem);
        /*
         * a bit hacky approach to learn NID of corresponding connection
         * but there is no official API to access information like this
@@ -114,7 +114,7 @@ int qos_add_tgt(struct lod_device *lod, struct lod_ost_desc *ost_desc)
        lod->lod_qos.lq_rr.lqr_dirty = 1;
 
 out:
-       cfs_up_write(&lod->lod_qos.lq_rw_sem);
+       up_write(&lod->lod_qos.lq_rw_sem);
        RETURN(rc);
 }
 
@@ -124,7 +124,7 @@ int qos_del_tgt(struct lod_device *lod, struct lod_ost_desc *ost_desc)
        int                 rc = 0;
        ENTRY;
 
-       cfs_down_write(&lod->lod_qos.lq_rw_sem);
+       down_write(&lod->lod_qos.lq_rw_sem);
        oss = ost_desc->ltd_qos.ltq_oss;
        if (!oss)
                GOTO(out, rc = -ENOENT);
@@ -141,7 +141,7 @@ int qos_del_tgt(struct lod_device *lod, struct lod_ost_desc *ost_desc)
        lod->lod_qos.lq_dirty = 1;
        lod->lod_qos.lq_rr.lqr_dirty = 1;
 out:
-       cfs_up_write(&lod->lod_qos.lq_rw_sem);
+       up_write(&lod->lod_qos.lq_rw_sem);
        RETURN(rc);
 }
 
@@ -162,7 +162,7 @@ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d,
        /* check whether device has changed state (active, inactive) */
        if (rc != 0 && ost->ltd_active) {
                /* turned inactive? */
-               cfs_spin_lock(&d->lod_desc_lock);
+               spin_lock(&d->lod_desc_lock);
                if (ost->ltd_active) {
                        ost->ltd_active = 0;
                        LASSERT(d->lod_desc.ld_active_tgt_count > 0);
@@ -172,11 +172,11 @@ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d,
                        CDEBUG(D_CONFIG, "%s: turns inactive\n",
                               ost->ltd_exp->exp_obd->obd_name);
                }
-               cfs_spin_unlock(&d->lod_desc_lock);
+               spin_unlock(&d->lod_desc_lock);
        } else if (rc == 0 && ost->ltd_active == 0) {
                /* turned active? */
                LASSERT(d->lod_desc.ld_active_tgt_count < d->lod_ostnr);
-               cfs_spin_lock(&d->lod_desc_lock);
+               spin_lock(&d->lod_desc_lock);
                if (ost->ltd_active == 0) {
                        ost->ltd_active = 1;
                        d->lod_desc.ld_active_tgt_count++;
@@ -185,7 +185,7 @@ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d,
                        CDEBUG(D_CONFIG, "%s: turns active\n",
                               ost->ltd_exp->exp_obd->obd_name);
                }
-               cfs_spin_unlock(&d->lod_desc_lock);
+               spin_unlock(&d->lod_desc_lock);
        }
 
        return rc;
@@ -206,7 +206,7 @@ static void lod_qos_statfs_update(const struct lu_env *env,
                /* statfs data are quite recent, don't need to refresh it */
                RETURN_EXIT;
 
-       cfs_down_write(&lod->lod_qos.lq_rw_sem);
+       down_write(&lod->lod_qos.lq_rw_sem);
        if (cfs_time_beforeq_64(max_age, obd->obd_osfs_age))
                GOTO(out, rc = 0);
 
@@ -224,7 +224,7 @@ static void lod_qos_statfs_update(const struct lu_env *env,
        obd->obd_osfs_age = cfs_time_current_64();
 
 out:
-       cfs_up_write(&lod->lod_qos.lq_rw_sem);
+       up_write(&lod->lod_qos.lq_rw_sem);
 }
 
 /* Recalculate per-object penalties for OSSs and OSTs,
@@ -454,7 +454,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
        }
 
        /* Do actual allocation. */
-       cfs_down_write(&lod->lod_qos.lq_rw_sem);
+       down_write(&lod->lod_qos.lq_rw_sem);
 
        /*
         * Check again. While we were sleeping on @lq_rw_sem something could
@@ -462,7 +462,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
         */
        if (!lqr->lqr_dirty) {
                LASSERT(lqr->lqr_pool.op_size);
-               cfs_up_write(&lod->lod_qos.lq_rw_sem);
+               up_write(&lod->lod_qos.lq_rw_sem);
                RETURN(0);
        }
 
@@ -475,7 +475,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
        lqr->lqr_pool.op_count = real_count;
        rc = lod_ost_pool_extend(&lqr->lqr_pool, real_count);
        if (rc) {
-               cfs_up_write(&lod->lod_qos.lq_rw_sem);
+               up_write(&lod->lod_qos.lq_rw_sem);
                RETURN(rc);
        }
        for (i = 0; i < lqr->lqr_pool.op_count; i++)
@@ -510,7 +510,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool,
        }
 
        lqr->lqr_dirty = 0;
-       cfs_up_write(&lod->lod_qos.lq_rw_sem);
+       up_write(&lod->lod_qos.lq_rw_sem);
 
        if (placed != real_count) {
                /* This should never happen */
@@ -613,7 +613,7 @@ static int inline lod_qos_dev_is_full(struct obd_statfs *msfs)
 
        /* the minimum of 0.1% used blocks and 1GB bytes. */
        used = min_t(__u64, (msfs->os_blocks - msfs->os_bfree) >> 10,
-                       1 << (31 - cfs_ffs(bs)));
+                       1 << (31 - ffs(bs)));
        return (msfs->os_bavail < used);
 }
 
@@ -679,7 +679,7 @@ static int lod_alloc_rr(const struct lu_env *env, struct lod_object *lo,
                pool = lod_find_pool(m, lo->ldo_pool);
 
        if (pool != NULL) {
-               cfs_down_read(&pool_tgt_rw_sem(pool));
+               down_read(&pool_tgt_rw_sem(pool));
                osts = &(pool->pool_obds);
                lqr = &(pool->pool_rr);
        } else {
@@ -709,7 +709,7 @@ static int lod_alloc_rr(const struct lu_env *env, struct lod_object *lo,
                if (stripe_cnt > 1 && (osts->op_count % stripe_cnt) != 1)
                        ++lqr->lqr_offset_idx;
        }
-       cfs_down_read(&m->lod_qos.lq_rw_sem);
+       down_read(&m->lod_qos.lq_rw_sem);
        ost_start_idx_temp = lqr->lqr_start_idx;
 
 repeat_find:
@@ -801,7 +801,7 @@ repeat_find:
                goto repeat_find;
        }
 
-       cfs_up_read(&m->lod_qos.lq_rw_sem);
+       up_read(&m->lod_qos.lq_rw_sem);
 
        if (stripe_idx) {
                lo->ldo_stripenr = stripe_idx;
@@ -814,7 +814,7 @@ repeat_find:
 
 out:
        if (pool != NULL) {
-               cfs_up_read(&pool_tgt_rw_sem(pool));
+               up_read(&pool_tgt_rw_sem(pool));
                /* put back ref got by lod_find_pool() */
                lod_pool_putref(pool);
        }
@@ -844,7 +844,7 @@ static int lod_alloc_specific(const struct lu_env *env, struct lod_object *lo,
                pool = lod_find_pool(m, lo->ldo_pool);
 
        if (pool != NULL) {
-               cfs_down_read(&pool_tgt_rw_sem(pool));
+               down_read(&pool_tgt_rw_sem(pool));
                osts = &(pool->pool_obds);
        } else {
                osts = &(m->lod_pool_info);
@@ -941,7 +941,7 @@ repeat_find:
        rc = -EFBIG;
 out:
        if (pool != NULL) {
-               cfs_up_read(&pool_tgt_rw_sem(pool));
+               up_read(&pool_tgt_rw_sem(pool));
                /* put back ref got by lod_find_pool() */
                lod_pool_putref(pool);
        }
@@ -993,7 +993,7 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                pool = lod_find_pool(m, lo->ldo_pool);
 
        if (pool != NULL) {
-               cfs_down_read(&pool_tgt_rw_sem(pool));
+               down_read(&pool_tgt_rw_sem(pool));
                osts = &(pool->pool_obds);
        } else {
                osts = &(m->lod_pool_info);
@@ -1004,7 +1004,7 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
                GOTO(out_nolock, rc = -EAGAIN);
 
        /* Do actual allocation, use write lock here. */
-       cfs_down_write(&m->lod_qos.lq_rw_sem);
+       down_write(&m->lod_qos.lq_rw_sem);
 
        /*
         * Check again, while we were sleeping on @lq_rw_sem things could
@@ -1167,11 +1167,11 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo,
        }
 
 out:
-       cfs_up_write(&m->lod_qos.lq_rw_sem);
+       up_write(&m->lod_qos.lq_rw_sem);
 
 out_nolock:
        if (pool != NULL) {
-               cfs_up_read(&pool_tgt_rw_sem(pool));
+               up_read(&pool_tgt_rw_sem(pool));
                /* put back ref got by lod_find_pool() */
                lod_pool_putref(pool);
        }
index 819dab4..881e914 100644 (file)
@@ -340,7 +340,7 @@ static void *lod_osts_seq_start(struct seq_file *p, loff_t *pos)
        if (*pos >= lod->lod_ost_bitmap->size)
                return NULL;
 
-       *pos = cfs_find_next_bit(lod->lod_ost_bitmap->data,
+       *pos = find_next_bit(lod->lod_ost_bitmap->data,
                                 lod->lod_ost_bitmap->size, *pos);
        if (*pos < lod->lod_ost_bitmap->size)
                return OST_TGT(lod,*pos);
@@ -366,7 +366,7 @@ static void *lod_osts_seq_next(struct seq_file *p, void *v, loff_t *pos)
        if (*pos >= lod->lod_ost_bitmap->size - 1)
                return NULL;
 
-       *pos = cfs_find_next_bit(lod->lod_ost_bitmap->data,
+       *pos = find_next_bit(lod->lod_ost_bitmap->data,
                                 lod->lod_ost_bitmap->size, *pos + 1);
        if (*pos < lod->lod_ost_bitmap->size)
                return OST_TGT(lod,*pos);
index b74df27..ed991e0 100644 (file)
@@ -156,7 +156,7 @@ struct lov_device {
          * Serializes access to lov_device::ld_emrg in low-memory
          * conditions.
          */
-        cfs_mutex_t               ld_mutex;
+       struct mutex              ld_mutex;
 };
 
 /**
@@ -194,7 +194,7 @@ struct lov_object {
          *
          * \see lov_object::lo_type
          */
-        cfs_rw_semaphore_t     lo_type_guard;
+       struct rw_semaphore     lo_type_guard;
         /**
          * Type of an object. Protected by lov_object::lo_type_guard.
          */
@@ -242,7 +242,7 @@ struct lov_object {
                         /**
                          * protect lo_sub
                          */
-                        cfs_spinlock_t         lo_sub_lock;
+                       spinlock_t              lo_sub_lock;
                         /**
                          * Cached object attribute, built from sub-object
                          * attributes.
index 5fe5162..ea208dc 100644 (file)
@@ -58,7 +58,7 @@ cfs_mem_cache_t *lovsub_req_kmem;
 cfs_mem_cache_t *lov_lock_link_kmem;
 
 /** Lock class of lov_device::ld_mutex. */
-cfs_lock_class_key_t cl_lov_device_mutex_class;
+struct lock_class_key cl_lov_device_mutex_class;
 
 struct lu_kmem_descr lov_caches[] = {
         {
@@ -385,7 +385,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
 
                 OBD_ALLOC(newd, tgt_size * sz);
                 if (newd != NULL) {
-                        cfs_mutex_lock(&dev->ld_mutex);
+                       mutex_lock(&dev->ld_mutex);
                         if (sub_size > 0) {
                                 memcpy(newd, dev->ld_target, sub_size * sz);
                                 OBD_FREE(dev->ld_target, sub_size * sz);
@@ -396,7 +396,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
                         if (dev->ld_emrg != NULL)
                                 lov_emerg_free(dev->ld_emrg, sub_size);
                         dev->ld_emrg = emerg;
-                        cfs_mutex_unlock(&dev->ld_mutex);
+                       mutex_unlock(&dev->ld_mutex);
                 } else {
                         lov_emerg_free(emerg, tgt_size);
                         result = -ENOMEM;
@@ -502,8 +502,8 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
         d->ld_ops        = &lov_lu_ops;
         ld->ld_cl.cd_ops = &lov_cl_ops;
 
-        cfs_mutex_init(&ld->ld_mutex);
-        cfs_lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
+       mutex_init(&ld->ld_mutex);
+       lockdep_set_class(&ld->ld_mutex, &cl_lov_device_mutex_class);
 
         /* setup the LOV OBD */
         obd = class_name2obd(lustre_cfg_string(cfg, 0));
index 48dfef6..3ed5b82 100644 (file)
@@ -181,16 +181,16 @@ static void lov_tgt_maxbytes(struct lov_tgt_desc *tgt, __u64 *stripe_maxbytes)
                 return;
         }
 
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_state == LUSTRE_IMP_FULL &&
-           (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
-           imp->imp_connect_data.ocd_maxbytes > 0) {
-                if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
-                        *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
-        } else {
-                *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
-        }
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_state == LUSTRE_IMP_FULL &&
+           (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES) &&
+           imp->imp_connect_data.ocd_maxbytes > 0) {
+               if (*stripe_maxbytes > imp->imp_connect_data.ocd_maxbytes)
+                       *stripe_maxbytes = imp->imp_connect_data.ocd_maxbytes;
+       } else {
+               *stripe_maxbytes = LUSTRE_STRIPE_MAXBYTES;
+       }
+       spin_unlock(&imp->imp_lock);
 }
 
 static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
index c2f1e15..b0586b5 100644 (file)
@@ -84,7 +84,7 @@ struct lov_request_set {
        struct lov_lock_handles         *set_lockh;
        cfs_list_t                      set_list;
        cfs_waitq_t                     set_waitq;
-       cfs_spinlock_t                  set_lock;
+       spinlock_t                      set_lock;
 };
 
 extern cfs_mem_cache_t *lov_oinfo_slab;
index e8a5c54..1543afd 100644 (file)
@@ -156,7 +156,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
         sub->sub_borrowed = 0;
 
         if (lio->lis_mem_frozen) {
-                LASSERT(cfs_mutex_is_locked(&ld->ld_mutex));
+               LASSERT(mutex_is_locked(&ld->ld_mutex));
                 sub->sub_io  = &ld->ld_emrg[stripe]->emrg_subio;
                 sub->sub_env = ld->ld_emrg[stripe]->emrg_env;
                 sub->sub_borrowed = 1;
@@ -628,7 +628,7 @@ static int lov_io_submit(const struct lu_env *env,
                  * In order to not make things worse, even don't try to
                  * allocate the memory with __GFP_NOWARN. -jay
                  */
-                cfs_mutex_lock(&ld->ld_mutex);
+               mutex_lock(&ld->ld_mutex);
                 lio->lis_mem_frozen = 1;
         }
 
@@ -681,7 +681,7 @@ static int lov_io_submit(const struct lu_env *env,
                                 lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
                 }
                 lio->lis_mem_frozen = 0;
-                cfs_mutex_unlock(&ld->ld_mutex);
+               mutex_unlock(&ld->ld_mutex);
         }
 
         RETURN(rc);
index 4e0df27..5aef196 100644 (file)
@@ -74,9 +74,9 @@ static void lov_getref(struct obd_device *obd)
         struct lov_obd *lov = &obd->u.lov;
 
         /* nobody gets through here until lov_putref is done */
-        cfs_mutex_lock(&lov->lov_lock);
+       mutex_lock(&lov->lov_lock);
         cfs_atomic_inc(&lov->lov_refcount);
-        cfs_mutex_unlock(&lov->lov_lock);
+       mutex_unlock(&lov->lov_lock);
         return;
 }
 
@@ -86,7 +86,7 @@ static void lov_putref(struct obd_device *obd)
 {
         struct lov_obd *lov = &obd->u.lov;
 
-        cfs_mutex_lock(&lov->lov_lock);
+       mutex_lock(&lov->lov_lock);
         /* ok to dec to 0 more than once -- ltd_exp's will be null */
         if (cfs_atomic_dec_and_test(&lov->lov_refcount) && lov->lov_death_row) {
                 CFS_LIST_HEAD(kill);
@@ -107,7 +107,7 @@ static void lov_putref(struct obd_device *obd)
                         lov->lov_tgts[i] = NULL;
                         lov->lov_death_row--;
                 }
-                cfs_mutex_unlock(&lov->lov_lock);
+               mutex_unlock(&lov->lov_lock);
 
                 cfs_list_for_each_entry_safe(tgt, n, &kill, ltd_kill) {
                         cfs_list_del(&tgt->ltd_kill);
@@ -115,7 +115,7 @@ static void lov_putref(struct obd_device *obd)
                         __lov_del_obd(obd, tgt);
                 }
         } else {
-                cfs_mutex_unlock(&lov->lov_lock);
+               mutex_unlock(&lov->lov_lock);
         }
 }
 
@@ -539,13 +539,13 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
         if (tgt_obd == NULL)
                 RETURN(-EINVAL);
 
-        cfs_mutex_lock(&lov->lov_lock);
+       mutex_lock(&lov->lov_lock);
 
         if ((index < lov->lov_tgt_size) && (lov->lov_tgts[index] != NULL)) {
                 tgt = lov->lov_tgts[index];
                 CERROR("UUID %s already assigned at LOV target index %d\n",
                        obd_uuid2str(&tgt->ltd_uuid), index);
-                cfs_mutex_unlock(&lov->lov_lock);
+               mutex_unlock(&lov->lov_lock);
                 RETURN(-EEXIST);
         }
 
@@ -559,7 +559,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
                         newsize = newsize << 1;
                 OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
                 if (newtgts == NULL) {
-                        cfs_mutex_unlock(&lov->lov_lock);
+                       mutex_unlock(&lov->lov_lock);
                         RETURN(-ENOMEM);
                 }
 
@@ -584,13 +584,13 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
 
         OBD_ALLOC_PTR(tgt);
         if (!tgt) {
-                cfs_mutex_unlock(&lov->lov_lock);
+               mutex_unlock(&lov->lov_lock);
                 RETURN(-ENOMEM);
         }
 
         rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
         if (rc) {
-                cfs_mutex_unlock(&lov->lov_lock);
+               mutex_unlock(&lov->lov_lock);
                 OBD_FREE_PTR(tgt);
                 RETURN(rc);
         }
@@ -605,7 +605,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
         if (index >= lov->desc.ld_tgt_count)
                 lov->desc.ld_tgt_count = index + 1;
 
-        cfs_mutex_unlock(&lov->lov_lock);
+       mutex_unlock(&lov->lov_lock);
 
         CDEBUG(D_CONFIG, "idx=%d ltd_gen=%d ld_tgt_count=%d\n",
                 index, tgt->ltd_gen, lov->desc.ld_tgt_count);
@@ -803,7 +803,7 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         lov->desc = *desc;
         lov->lov_tgt_size = 0;
 
-        cfs_mutex_init(&lov->lov_lock);
+       mutex_init(&lov->lov_lock);
         cfs_atomic_set(&lov->lov_refcount, 0);
         lov->lov_sp_me = LUSTRE_SP_CLI;
 
@@ -2679,18 +2679,18 @@ static int lov_extent_calc(struct obd_export *exp, struct lov_stripe_md *lsm,
 
 void lov_stripe_lock(struct lov_stripe_md *md)
 {
-        LASSERT(md->lsm_lock_owner != cfs_curproc_pid());
-        cfs_spin_lock(&md->lsm_lock);
-        LASSERT(md->lsm_lock_owner == 0);
-        md->lsm_lock_owner = cfs_curproc_pid();
+       LASSERT(md->lsm_lock_owner != cfs_curproc_pid());
+       spin_lock(&md->lsm_lock);
+       LASSERT(md->lsm_lock_owner == 0);
+       md->lsm_lock_owner = cfs_curproc_pid();
 }
 EXPORT_SYMBOL(lov_stripe_lock);
 
 void lov_stripe_unlock(struct lov_stripe_md *md)
 {
-        LASSERT(md->lsm_lock_owner == cfs_curproc_pid());
-        md->lsm_lock_owner = 0;
-        cfs_spin_unlock(&md->lsm_lock);
+       LASSERT(md->lsm_lock_owner == cfs_curproc_pid());
+       md->lsm_lock_owner = 0;
+       spin_unlock(&md->lsm_lock);
 }
 EXPORT_SYMBOL(lov_stripe_unlock);
 
index 99d543a..65e4466 100644 (file)
@@ -201,7 +201,7 @@ static int lov_init_raid0(const struct lu_env *env,
         if (r0->lo_sub != NULL) {
                 result = 0;
                 subconf->coc_inode = conf->coc_inode;
-                cfs_spin_lock_init(&r0->lo_sub_lock);
+               spin_lock_init(&r0->lo_sub_lock);
                 /*
                  * Create stripe cl_objects.
                  */
@@ -269,12 +269,12 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
                         /* this wait-queue is signaled at the end of
                          * lu_object_free(). */
                         cfs_set_current_state(CFS_TASK_UNINT);
-                        cfs_spin_lock(&r0->lo_sub_lock);
-                        if (r0->lo_sub[idx] == los) {
-                                cfs_spin_unlock(&r0->lo_sub_lock);
-                                cfs_waitq_wait(waiter, CFS_TASK_UNINT);
-                        } else {
-                                cfs_spin_unlock(&r0->lo_sub_lock);
+                       spin_lock(&r0->lo_sub_lock);
+                       if (r0->lo_sub[idx] == los) {
+                               spin_unlock(&r0->lo_sub_lock);
+                               cfs_waitq_wait(waiter, CFS_TASK_UNINT);
+                       } else {
+                               spin_unlock(&r0->lo_sub_lock);
                                 cfs_set_current_state(CFS_TASK_RUNNING);
                                 break;
                         }
@@ -476,13 +476,13 @@ const static struct lov_layout_operations lov_dispatch[] = {
 static inline void lov_conf_freeze(struct lov_object *lov)
 {
        if (lov->lo_owner != cfs_current())
-               cfs_down_read(&lov->lo_type_guard);
+               down_read(&lov->lo_type_guard);
 }
 
 static inline void lov_conf_thaw(struct lov_object *lov)
 {
        if (lov->lo_owner != cfs_current())
-               cfs_up_read(&lov->lo_type_guard);
+               up_read(&lov->lo_type_guard);
 }
 
 #define LOV_2DISPATCH_MAYLOCK(obj, op, lock, ...)                       \
@@ -520,7 +520,7 @@ do {                                                                    \
 static void lov_conf_lock(struct lov_object *lov)
 {
        LASSERT(lov->lo_owner != cfs_current());
-       cfs_down_write(&lov->lo_type_guard);
+       down_write(&lov->lo_type_guard);
        LASSERT(lov->lo_owner == NULL);
        lov->lo_owner = cfs_current();
 }
@@ -528,7 +528,7 @@ static void lov_conf_lock(struct lov_object *lov)
 static void lov_conf_unlock(struct lov_object *lov)
 {
        lov->lo_owner = NULL;
-       cfs_up_write(&lov->lo_type_guard);
+       up_write(&lov->lo_type_guard);
 }
 
 static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
@@ -621,7 +621,7 @@ int lov_object_init(const struct lu_env *env, struct lu_object *obj,
         int result;
 
         ENTRY;
-        cfs_init_rwsem(&lov->lo_type_guard);
+       init_rwsem(&lov->lo_type_guard);
        cfs_waitq_init(&lov->lo_waitq);
 
         /* no locking is necessary, as object is being created */
index 9840c2a..8b07dd9 100644 (file)
@@ -316,7 +316,7 @@ int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
         }
 
        cfs_atomic_set(&(*lsmp)->lsm_refc, 1);
-        cfs_spin_lock_init(&(*lsmp)->lsm_lock);
+       spin_lock_init(&(*lsmp)->lsm_lock);
         (*lsmp)->lsm_magic = magic;
         (*lsmp)->lsm_stripe_count = stripe_count;
         (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES * stripe_count;
index 4c88202..7c36cec 100644 (file)
@@ -186,14 +186,14 @@ static void *pool_proc_next(struct seq_file *s, void *v, loff_t *pos)
 
         /* iterate to find a non empty entry */
         prev_idx = iter->idx;
-        cfs_down_read(&pool_tgt_rw_sem(iter->pool));
+       down_read(&pool_tgt_rw_sem(iter->pool));
         iter->idx++;
         if (iter->idx == pool_tgt_count(iter->pool)) {
                 iter->idx = prev_idx; /* we stay on the last entry */
-                cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+               up_read(&pool_tgt_rw_sem(iter->pool));
                 return NULL;
         }
-        cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+       up_read(&pool_tgt_rw_sem(iter->pool));
         (*pos)++;
         /* return != NULL to continue */
         return iter;
@@ -263,9 +263,9 @@ static int pool_proc_show(struct seq_file *s, void *v)
         LASSERT(iter->pool != NULL);
         LASSERT(iter->idx <= pool_tgt_count(iter->pool));
 
-        cfs_down_read(&pool_tgt_rw_sem(iter->pool));
+       down_read(&pool_tgt_rw_sem(iter->pool));
         tgt = pool_tgt(iter->pool, iter->idx);
-        cfs_up_read(&pool_tgt_rw_sem(iter->pool));
+       up_read(&pool_tgt_rw_sem(iter->pool));
         if (tgt)
                 seq_printf(s, "%s\n", obd_uuid2str(&(tgt->ltd_uuid)));
 
@@ -307,7 +307,7 @@ void lov_dump_pool(int level, struct pool_desc *pool)
 
         CDEBUG(level, "pool "LOV_POOLNAMEF" has %d members\n",
                pool->pool_name, pool->pool_obds.op_count);
-        cfs_down_read(&pool_tgt_rw_sem(pool));
+       down_read(&pool_tgt_rw_sem(pool));
 
         for (i = 0; i < pool_tgt_count(pool) ; i++) {
                 if (!pool_tgt(pool, i) || !(pool_tgt(pool, i))->ltd_exp)
@@ -317,7 +317,7 @@ void lov_dump_pool(int level, struct pool_desc *pool)
                        obd_uuid2str(&((pool_tgt(pool, i))->ltd_uuid)));
         }
 
-        cfs_up_read(&pool_tgt_rw_sem(pool));
+       up_read(&pool_tgt_rw_sem(pool));
         lov_pool_putref(pool);
 }
 
@@ -330,7 +330,7 @@ int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
                 count = LOV_POOL_INIT_COUNT;
         op->op_array = NULL;
         op->op_count = 0;
-        cfs_init_rwsem(&op->op_rw_sem);
+       init_rwsem(&op->op_rw_sem);
         op->op_size = count;
         OBD_ALLOC(op->op_array, op->op_size * sizeof(op->op_array[0]));
         if (op->op_array == NULL) {
@@ -370,7 +370,7 @@ int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
         int rc = 0, i;
         ENTRY;
 
-        cfs_down_write(&op->op_rw_sem);
+       down_write(&op->op_rw_sem);
 
         rc = lov_ost_pool_extend(op, min_count);
         if (rc)
@@ -386,7 +386,7 @@ int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
         op->op_count++;
         EXIT;
 out:
-        cfs_up_write(&op->op_rw_sem);
+       up_write(&op->op_rw_sem);
         return rc;
 }
 
@@ -395,20 +395,20 @@ int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
         int i;
         ENTRY;
 
-        cfs_down_write(&op->op_rw_sem);
+       down_write(&op->op_rw_sem);
 
         for (i = 0; i < op->op_count; i++) {
                 if (op->op_array[i] == idx) {
                         memmove(&op->op_array[i], &op->op_array[i + 1],
                                 (op->op_count - i - 1) * sizeof(op->op_array[0]));
                         op->op_count--;
-                        cfs_up_write(&op->op_rw_sem);
+                       up_write(&op->op_rw_sem);
                         EXIT;
                         return 0;
                 }
         }
 
-        cfs_up_write(&op->op_rw_sem);
+       up_write(&op->op_rw_sem);
         RETURN(-EINVAL);
 }
 
@@ -419,14 +419,14 @@ int lov_ost_pool_free(struct ost_pool *op)
         if (op->op_size == 0)
                 RETURN(0);
 
-        cfs_down_write(&op->op_rw_sem);
+       down_write(&op->op_rw_sem);
 
         OBD_FREE(op->op_array, op->op_size * sizeof(op->op_array[0]));
         op->op_array = NULL;
         op->op_count = 0;
         op->op_size = 0;
 
-        cfs_up_write(&op->op_rw_sem);
+       up_write(&op->op_rw_sem);
         RETURN(0);
 }
 
@@ -481,10 +481,10 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
         CDEBUG(D_INFO, "pool %p - proc %p\n", new_pool, new_pool->pool_proc_entry);
 #endif
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        cfs_list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
-        lov->lov_pool_count++;
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
+       cfs_list_add_tail(&new_pool->pool_list, &lov->lov_pool_list);
+       lov->lov_pool_count++;
+       spin_unlock(&obd->obd_dev_lock);
 
         /* add to find only when it fully ready  */
         rc = cfs_hash_add_unique(lov->lov_pools_hash_body, poolname,
@@ -498,10 +498,10 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
         RETURN(0);
 
 out_err:
-        cfs_spin_lock(&obd->obd_dev_lock);
-        cfs_list_del_init(&new_pool->pool_list);
-        lov->lov_pool_count--;
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
+       cfs_list_del_init(&new_pool->pool_list);
+       lov->lov_pool_count--;
+       spin_unlock(&obd->obd_dev_lock);
 
         lprocfs_remove(&new_pool->pool_proc_entry);
 
@@ -531,15 +531,15 @@ int lov_pool_del(struct obd_device *obd, char *poolname)
                 lov_pool_putref(pool);
         }
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        cfs_list_del_init(&pool->pool_list);
-        lov->lov_pool_count--;
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
+       cfs_list_del_init(&pool->pool_list);
+       lov->lov_pool_count--;
+       spin_unlock(&obd->obd_dev_lock);
 
-        /* release last reference */
-        lov_pool_putref(pool);
+       /* release last reference */
+       lov_pool_putref(pool);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 
@@ -647,7 +647,7 @@ int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
          */
         lov_pool_getref(pool);
 
-        cfs_down_read(&pool_tgt_rw_sem(pool));
+       down_read(&pool_tgt_rw_sem(pool));
 
         for (i = 0; i < pool_tgt_count(pool); i++) {
                 if (pool_tgt_array(pool)[i] == idx)
@@ -656,7 +656,7 @@ int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
         rc = -ENOENT;
         EXIT;
 out:
-        cfs_up_read(&pool_tgt_rw_sem(pool));
+       up_read(&pool_tgt_rw_sem(pool));
 
         lov_pool_putref(pool);
         return rc;
index c972437..a6c537d 100644 (file)
@@ -58,7 +58,7 @@ static void lov_init_set(struct lov_request_set *set)
        CFS_INIT_LIST_HEAD(&set->set_list);
        cfs_atomic_set(&set->set_refcount, 1);
        cfs_waitq_init(&set->set_waitq);
-       cfs_spin_lock_init(&set->set_lock);
+       spin_lock_init(&set->set_lock);
 }
 
 void lov_finish_set(struct lov_request_set *set)
@@ -1320,14 +1320,14 @@ int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,int success)
                if (osfs->os_ffree != LOV_U64_MAX)
                        lov_do_div64(osfs->os_ffree, expected_stripes);
 
-                cfs_spin_lock(&obd->obd_osfs_lock);
-                memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
-                obd->obd_osfs_age = cfs_time_current_64();
-                cfs_spin_unlock(&obd->obd_osfs_lock);
-                RETURN(0);
-        }
+               spin_lock(&obd->obd_osfs_lock);
+               memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
+               obd->obd_osfs_age = cfs_time_current_64();
+               spin_unlock(&obd->obd_osfs_lock);
+               RETURN(0);
+       }
 
-        RETURN(-EIO);
+       RETURN(-EIO);
 }
 
 int lov_fini_statfs_set(struct lov_request_set *set)
@@ -1447,11 +1447,11 @@ static int cb_statfs_update(void *cookie, int rc)
                 GOTO(out_update, rc);
 
         tgtobd = class_exp2obd(tgt->ltd_exp);
-        cfs_spin_lock(&tgtobd->obd_osfs_lock);
-        memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
-        if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
-                tgtobd->obd_osfs_age = cfs_time_current_64();
-        cfs_spin_unlock(&tgtobd->obd_osfs_lock);
+       spin_lock(&tgtobd->obd_osfs_lock);
+       memcpy(&tgtobd->obd_osfs, lov_sfs, sizeof(*lov_sfs));
+       if ((oinfo->oi_flags & OBD_STATFS_FROM_CACHE) == 0)
+               tgtobd->obd_osfs_age = cfs_time_current_64();
+       spin_unlock(&tgtobd->obd_osfs_lock);
 
 out_update:
         lov_update_statfs(osfs, lov_sfs, success);
index fd3fd67..bc91e6d 100644 (file)
@@ -83,9 +83,9 @@ static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
         if (lov) {
                 LASSERT(lov->lo_type == LLT_RAID0);
                 LASSERT(lov->u.raid0.lo_sub[los->lso_index] == los);
-                cfs_spin_lock(&lov->u.raid0.lo_sub_lock);
-                lov->u.raid0.lo_sub[los->lso_index] = NULL;
-                cfs_spin_unlock(&lov->u.raid0.lo_sub_lock);
+               spin_lock(&lov->u.raid0.lo_sub_lock);
+               lov->u.raid0.lo_sub[los->lso_index] = NULL;
+               spin_unlock(&lov->u.raid0.lo_sub_lock);
         }
 
         lu_object_fini(obj);
index 4695daf..02df18f 100644 (file)
@@ -1160,7 +1160,7 @@ int fsfilt_ext3_map_bm_inode_pages(struct inode *inode, struct page **page,
 int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
                                 int pages, unsigned long *blocks,
                                 int *created, int create,
-                                cfs_mutex_t *optional_mutex)
+                               struct mutex *optional_mutex)
 {
         int rc;
 
@@ -1170,11 +1170,11 @@ int fsfilt_ext3_map_inode_pages(struct inode *inode, struct page **page,
                 return rc;
         }
         if (optional_mutex != NULL)
-                cfs_mutex_lock(optional_mutex);
+               mutex_lock(optional_mutex);
         rc = fsfilt_ext3_map_bm_inode_pages(inode, page, pages, blocks,
                                             created, create);
         if (optional_mutex != NULL)
-                cfs_mutex_unlock(optional_mutex);
+               mutex_unlock(optional_mutex);
 
         return rc;
 }
@@ -1187,8 +1187,8 @@ int fsfilt_ext3_read(struct inode *inode, void *buf, int size, loff_t *offs)
 
         /* prevent reading after eof */
        spin_lock(&inode->i_lock);
-        if (i_size_read(inode) < *offs + size) {
-                size = i_size_read(inode) - *offs;
+       if (i_size_read(inode) < *offs + size) {
+               size = i_size_read(inode) - *offs;
                spin_unlock(&inode->i_lock);
                 if (size < 0) {
                         CDEBUG(D_EXT2, "size %llu is too short for read @%llu\n",
@@ -1282,14 +1282,14 @@ int fsfilt_ext3_write_handle(struct inode *inode, void *buf, int bufsize,
         /* correct in-core and on-disk sizes */
         if (new_size > i_size_read(inode)) {
                spin_lock(&inode->i_lock);
-                if (new_size > i_size_read(inode))
-                        i_size_write(inode, new_size);
-                if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
-                        EXT3_I(inode)->i_disksize = i_size_read(inode);
-                if (i_size_read(inode) > old_size) {
+               if (new_size > i_size_read(inode))
+                       i_size_write(inode, new_size);
+               if (i_size_read(inode) > EXT3_I(inode)->i_disksize)
+                       EXT3_I(inode)->i_disksize = i_size_read(inode);
+               if (i_size_read(inode) > old_size) {
                        spin_unlock(&inode->i_lock);
-                        mark_inode_dirty(inode);
-                } else {
+                       mark_inode_dirty(inode);
+               } else {
                        spin_unlock(&inode->i_lock);
                 }
         }
index 73bd8a4..19c597c 100644 (file)
@@ -170,16 +170,15 @@ int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int idx)
                rc = 0;
                if (unlikely(stats->ls_biggest_alloc_num <= idx)) {
                        if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
-                               cfs_spin_lock_irqsave(&stats->ls_lock, flags);
+                               spin_lock_irqsave(&stats->ls_lock, flags);
                        else
-                               cfs_spin_lock(&stats->ls_lock);
+                               spin_lock(&stats->ls_lock);
                        if (stats->ls_biggest_alloc_num <= idx)
                                stats->ls_biggest_alloc_num = idx + 1;
                        if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) {
-                               cfs_spin_unlock_irqrestore(&stats->ls_lock,
-                                                          flags);
+                               spin_unlock_irqrestore(&stats->ls_lock, flags);
                        } else {
-                               cfs_spin_unlock(&stats->ls_lock);
+                               spin_unlock(&stats->ls_lock);
                        }
                }
 
index 163827a..5f1274c 100644 (file)
@@ -542,42 +542,42 @@ EXPORT_SYMBOL(lvfs_check_io_health);
 
 void obd_update_maxusage()
 {
-        __u64 max1, max2;
+       __u64 max1, max2;
 
-        max1 = obd_pages_sum();
-        max2 = obd_memory_sum();
+       max1 = obd_pages_sum();
+       max2 = obd_memory_sum();
 
-        cfs_spin_lock(&obd_updatemax_lock);
-        if (max1 > obd_max_pages)
-                obd_max_pages = max1;
-        if (max2 > obd_max_alloc)
-                obd_max_alloc = max2;
-        cfs_spin_unlock(&obd_updatemax_lock);
+       spin_lock(&obd_updatemax_lock);
+       if (max1 > obd_max_pages)
+               obd_max_pages = max1;
+       if (max2 > obd_max_alloc)
+               obd_max_alloc = max2;
+       spin_unlock(&obd_updatemax_lock);
 
 }
 EXPORT_SYMBOL(obd_update_maxusage);
 
 __u64 obd_memory_max(void)
 {
-        __u64 ret;
+       __u64 ret;
 
-        cfs_spin_lock(&obd_updatemax_lock);
-        ret = obd_max_alloc;
-        cfs_spin_unlock(&obd_updatemax_lock);
+       spin_lock(&obd_updatemax_lock);
+       ret = obd_max_alloc;
+       spin_unlock(&obd_updatemax_lock);
 
-        return ret;
+       return ret;
 }
 EXPORT_SYMBOL(obd_memory_max);
 
 __u64 obd_pages_max(void)
 {
-        __u64 ret;
+       __u64 ret;
 
-        cfs_spin_lock(&obd_updatemax_lock);
-        ret = obd_max_pages;
-        cfs_spin_unlock(&obd_updatemax_lock);
+       spin_lock(&obd_updatemax_lock);
+       ret = obd_max_pages;
+       spin_unlock(&obd_updatemax_lock);
 
-        return ret;
+       return ret;
 }
 EXPORT_SYMBOL(obd_pages_max);
 
index 925c2e5..5226a05 100644 (file)
@@ -225,11 +225,11 @@ int mdc_find_cbdata(struct obd_export *exp,
 
 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
 {
-        /* Don't hold error requests for replay. */
-        if (req->rq_replay) {
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_replay = 0;
-                cfs_spin_unlock(&req->rq_lock);
+       /* Don't hold error requests for replay. */
+       if (req->rq_replay) {
+               spin_lock(&req->rq_lock);
+               req->rq_replay = 0;
+               spin_unlock(&req->rq_lock);
         }
         if (rc && req->rq_transno != 0) {
                 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc);
@@ -330,9 +330,9 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
                 return NULL;
         }
 
-        cfs_spin_lock(&req->rq_lock);
-        req->rq_replay = req->rq_import->imp_replayable;
-        cfs_spin_unlock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
+       req->rq_replay = req->rq_import->imp_replayable;
+       spin_unlock(&req->rq_lock);
 
         /* pack the intent */
         lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
index 4121826..fa30f9d 100644 (file)
@@ -692,11 +692,11 @@ void mdc_commit_open(struct ptlrpc_request *req)
          * be put along with freeing \var mod.
          */
         ptlrpc_request_addref(req);
-        cfs_spin_lock(&req->rq_lock);
-        req->rq_committed = 1;
-        cfs_spin_unlock(&req->rq_lock);
-        req->rq_cb_data = NULL;
-        obd_mod_put(mod);
+       spin_lock(&req->rq_lock);
+       req->rq_committed = 1;
+       spin_unlock(&req->rq_lock);
+       req->rq_cb_data = NULL;
+       obd_mod_put(mod);
 }
 
 int mdc_set_open_replay_data(struct obd_export *exp,
@@ -737,13 +737,13 @@ int mdc_set_open_replay_data(struct obd_export *exp,
                 obd_mod_get(mod);
                 obd_mod_get(mod);
 
-                cfs_spin_lock(&open_req->rq_lock);
-                och->och_mod = mod;
-                mod->mod_och = och;
-                mod->mod_open_req = open_req;
-                open_req->rq_cb_data = mod;
-                open_req->rq_commit_cb = mdc_commit_open;
-                cfs_spin_unlock(&open_req->rq_lock);
+               spin_lock(&open_req->rq_lock);
+               och->och_mod = mod;
+               mod->mod_och = och;
+               mod->mod_open_req = open_req;
+               open_req->rq_cb_data = mod;
+               open_req->rq_commit_cb = mdc_commit_open;
+               spin_unlock(&open_req->rq_lock);
         }
 
         rec->cr_fid2 = body->fid1;
@@ -836,9 +836,9 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
                 DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
                 /* We no longer want to preserve this open for replay even
                  * though the open was committed. b=3632, b=3633 */
-                cfs_spin_lock(&mod->mod_open_req->rq_lock);
-                mod->mod_open_req->rq_replay = 0;
-                cfs_spin_unlock(&mod->mod_open_req->rq_lock);
+               spin_lock(&mod->mod_open_req->rq_lock);
+               mod->mod_open_req->rq_replay = 0;
+               spin_unlock(&mod->mod_open_req->rq_lock);
         } else {
                  CDEBUG(D_HA, "couldn't find open req; expecting close error\n");
         }
@@ -929,9 +929,9 @@ int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
                 DEBUG_REQ(D_HA, mod->mod_open_req, "matched setattr");
                 /* We no longer want to preserve this setattr for replay even
                  * though the open was committed. b=3632, b=3633 */
-                cfs_spin_lock(&mod->mod_open_req->rq_lock);
-                mod->mod_open_req->rq_replay = 0;
-                cfs_spin_unlock(&mod->mod_open_req->rq_lock);
+               spin_lock(&mod->mod_open_req->rq_lock);
+               mod->mod_open_req->rq_replay = 0;
+               spin_unlock(&mod->mod_open_req->rq_lock);
         }
 
         mdc_close_pack(req, op_data);
@@ -1111,10 +1111,10 @@ static int mdc_statfs(const struct lu_env *env,
          * Since the request might also come from lprocfs, so we need
          * sync this with client_disconnect_export Bug15684
          */
-        cfs_down_read(&obd->u.cli.cl_sem);
+       down_read(&obd->u.cli.cl_sem);
         if (obd->u.cli.cl_import)
                 imp = class_import_get(obd->u.cli.cl_import);
-        cfs_up_read(&obd->u.cli.cl_sem);
+       up_read(&obd->u.cli.cl_sem);
         if (!imp)
                 RETURN(-ENODEV);
 
@@ -1735,15 +1735,17 @@ int mdc_set_info_async(const struct lu_env *env,
                 if (vallen != sizeof(int))
                         RETURN(-EINVAL);
 
-                cfs_spin_lock(&imp->imp_lock);
-                if (*((int *)val)) {
-                        imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
-                        imp->imp_connect_data.ocd_connect_flags |= OBD_CONNECT_RDONLY;
-                } else {
-                        imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
-                        imp->imp_connect_data.ocd_connect_flags &= ~OBD_CONNECT_RDONLY;
-                }
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               if (*((int *)val)) {
+                       imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
+                       imp->imp_connect_data.ocd_connect_flags |=
+                                                       OBD_CONNECT_RDONLY;
+               } else {
+                       imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
+                       imp->imp_connect_data.ocd_connect_flags &=
+                                                       ~OBD_CONNECT_RDONLY;
+               }
+               spin_unlock(&imp->imp_lock);
 
                 rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
                                        keylen, key, vallen, val, set);
@@ -1759,9 +1761,9 @@ int mdc_set_info_async(const struct lu_env *env,
         }
         if (KEY_IS(KEY_MDS_CONN)) {
                 /* mds-mds import */
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_server_timeout = 1;
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               imp->imp_server_timeout = 1;
+               spin_unlock(&imp->imp_lock);
                 imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
                 CDEBUG(D_OTHER, "%s: timeout / 2\n", exp->exp_obd->obd_name);
                 RETURN(0);
@@ -2335,13 +2337,13 @@ static int mdc_connect(const struct lu_env *env,
                        struct obd_connect_data *data,
                        void *localdata)
 {
-        struct obd_import *imp = obd->u.cli.cl_import;
+       struct obd_import *imp = obd->u.cli.cl_import;
 
-        /* mds-mds import features */
-        if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_server_timeout = 1;
-                cfs_spin_unlock(&imp->imp_lock);
+       /* mds-mds import features */
+       if (data && (data->ocd_connect_flags & OBD_CONNECT_MDS_MDS)) {
+               spin_lock(&imp->imp_lock);
+               imp->imp_server_timeout = 1;
+               spin_unlock(&imp->imp_lock);
                 imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
                 CDEBUG(D_OTHER, "%s: Set 'mds' portal and timeout\n",
                        obd->obd_name);
index 416d6e5..17000c1 100644 (file)
@@ -221,11 +221,11 @@ static int changelog_user_init_cb(const struct lu_env *env,
                " in log "LPX64"\n", hdr->lrh_index, rec->cur_hdr.lrh_index,
                rec->cur_id, rec->cur_endrec, llh->lgh_id.lgl_oid);
 
-        cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
-        mdd->mdd_cl.mc_lastuser = rec->cur_id;
-        cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
+       spin_lock(&mdd->mdd_cl.mc_user_lock);
+       mdd->mdd_cl.mc_lastuser = rec->cur_id;
+       spin_unlock(&mdd->mdd_cl.mc_user_lock);
 
-        return LLOG_PROC_BREAK;
+       return LLOG_PROC_BREAK;
 }
 
 static int llog_changelog_cancel_cb(const struct lu_env *env,
@@ -397,11 +397,11 @@ static int mdd_changelog_init(const struct lu_env *env, struct mdd_device *mdd)
        int                      rc;
 
        mdd->mdd_cl.mc_index = 0;
-       cfs_spin_lock_init(&mdd->mdd_cl.mc_lock);
+       spin_lock_init(&mdd->mdd_cl.mc_lock);
        mdd->mdd_cl.mc_starttime = cfs_time_current_64();
        mdd->mdd_cl.mc_flags = 0; /* off by default */
        mdd->mdd_cl.mc_mask = CHANGELOG_DEFMASK;
-       cfs_spin_lock_init(&mdd->mdd_cl.mc_user_lock);
+       spin_lock_init(&mdd->mdd_cl.mc_user_lock);
        mdd->mdd_cl.mc_lastuser = 0;
 
        rc = mdd_changelog_llog_init(env, mdd);
@@ -450,19 +450,19 @@ int mdd_changelog_on(const struct lu_env *env, struct mdd_device *mdd, int on)
                                mdd2obd_dev(mdd)->obd_name);
                         rc = -ESRCH;
                 } else {
-                        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
-                        mdd->mdd_cl.mc_flags |= CLM_ON;
-                        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+                       spin_lock(&mdd->mdd_cl.mc_lock);
+                       mdd->mdd_cl.mc_flags |= CLM_ON;
+                       spin_unlock(&mdd->mdd_cl.mc_lock);
                        rc = mdd_changelog_write_header(env, mdd, CLM_START);
-                }
-        } else if ((on == 0) && ((mdd->mdd_cl.mc_flags & CLM_ON) == CLM_ON)) {
-                LCONSOLE_INFO("%s: changelog off\n",mdd2obd_dev(mdd)->obd_name);
+               }
+       } else if ((on == 0) && ((mdd->mdd_cl.mc_flags & CLM_ON) == CLM_ON)) {
+               LCONSOLE_INFO("%s: changelog off\n",mdd2obd_dev(mdd)->obd_name);
                rc = mdd_changelog_write_header(env, mdd, CLM_FINI);
-                cfs_spin_lock(&mdd->mdd_cl.mc_lock);
-                mdd->mdd_cl.mc_flags &= ~CLM_ON;
-                cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
-        }
-        return rc;
+               spin_lock(&mdd->mdd_cl.mc_lock);
+               mdd->mdd_cl.mc_flags &= ~CLM_ON;
+               spin_unlock(&mdd->mdd_cl.mc_lock);
+       }
+       return rc;
 }
 
 /** Remove entries with indicies up to and including \a endrec from the
@@ -483,9 +483,9 @@ int mdd_changelog_llog_cancel(const struct lu_env *env,
         if (ctxt == NULL)
                 return -ENXIO;
 
-        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
-        cur = (long long)mdd->mdd_cl.mc_index;
-        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+       spin_lock(&mdd->mdd_cl.mc_lock);
+       cur = (long long)mdd->mdd_cl.mc_index;
+       spin_unlock(&mdd->mdd_cl.mc_lock);
         if (endrec > cur)
                 endrec = cur;
 
@@ -553,9 +553,9 @@ int mdd_changelog_write_header(const struct lu_env *env,
        rec->cr_hdr.lrh_len = llog_data_len(sizeof(*rec) + rec->cr.cr_namelen);
        rec->cr_hdr.lrh_type = CHANGELOG_REC;
        rec->cr.cr_time = cl_time();
-       cfs_spin_lock(&mdd->mdd_cl.mc_lock);
+       spin_lock(&mdd->mdd_cl.mc_lock);
        rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
-       cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+       spin_unlock(&mdd->mdd_cl.mc_lock);
 
        ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
        LASSERT(ctxt);
@@ -1459,15 +1459,15 @@ static int mdd_changelog_user_register(const struct lu_env *env,
 
         rec->cur_hdr.lrh_len = sizeof(*rec);
         rec->cur_hdr.lrh_type = CHANGELOG_USER_REC;
-        cfs_spin_lock(&mdd->mdd_cl.mc_user_lock);
-        if (mdd->mdd_cl.mc_lastuser == (unsigned int)(-1)) {
-                cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
-                CERROR("Maximum number of changelog users exceeded!\n");
-                GOTO(out, rc = -EOVERFLOW);
-        }
-        *id = rec->cur_id = ++mdd->mdd_cl.mc_lastuser;
-        rec->cur_endrec = mdd->mdd_cl.mc_index;
-        cfs_spin_unlock(&mdd->mdd_cl.mc_user_lock);
+       spin_lock(&mdd->mdd_cl.mc_user_lock);
+       if (mdd->mdd_cl.mc_lastuser == (unsigned int)(-1)) {
+               spin_unlock(&mdd->mdd_cl.mc_user_lock);
+               CERROR("Maximum number of changelog users exceeded!\n");
+               GOTO(out, rc = -EOVERFLOW);
+       }
+       *id = rec->cur_id = ++mdd->mdd_cl.mc_lastuser;
+       rec->cur_endrec = mdd->mdd_cl.mc_index;
+       spin_unlock(&mdd->mdd_cl.mc_user_lock);
 
        rc = llog_cat_add(env, ctxt->loc_handle, &rec->cur_hdr, NULL, NULL);
 
@@ -1568,9 +1568,9 @@ static int mdd_changelog_user_purge(const struct lu_env *env,
         data.mcud_minrec = 0;
         data.mcud_usercount = 0;
         data.mcud_endrec = endrec;
-        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
-        endrec = mdd->mdd_cl.mc_index;
-        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+       spin_lock(&mdd->mdd_cl.mc_lock);
+       endrec = mdd->mdd_cl.mc_index;
+       spin_unlock(&mdd->mdd_cl.mc_lock);
         if ((data.mcud_endrec == 0) ||
             ((data.mcud_endrec > endrec) &&
              (data.mcud_endrec != MCUD_UNREGISTER)))
index b4994b4..ab5247b 100644 (file)
@@ -703,12 +703,12 @@ int mdd_changelog_store(const struct lu_env *env, struct mdd_device *mdd,
        rec->cr_hdr.lrh_type = CHANGELOG_REC;
        rec->cr.cr_time = cl_time();
 
-       cfs_spin_lock(&mdd->mdd_cl.mc_lock);
+       spin_lock(&mdd->mdd_cl.mc_lock);
        /* NB: I suppose it's possible llog_add adds out of order wrt cr_index,
         * but as long as the MDD transactions are ordered correctly for e.g.
         * rename conflicts, I don't think this should matter. */
        rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
-       cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+       spin_unlock(&mdd->mdd_cl.mc_lock);
 
        ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
        if (ctxt == NULL)
@@ -741,12 +741,12 @@ int mdd_changelog_ext_store(const struct lu_env *env, struct mdd_device *mdd,
        rec->cr_hdr.lrh_type = CHANGELOG_REC;
        rec->cr.cr_time = cl_time();
 
-       cfs_spin_lock(&mdd->mdd_cl.mc_lock);
+       spin_lock(&mdd->mdd_cl.mc_lock);
        /* NB: I suppose it's possible llog_add adds out of order wrt cr_index,
         * but as long as the MDD transactions are ordered correctly for e.g.
         * rename conflicts, I don't think this should matter. */
        rec->cr.cr_index = ++mdd->mdd_cl.mc_index;
-       cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+       spin_unlock(&mdd->mdd_cl.mc_lock);
 
        ctxt = llog_get_context(obd, LLOG_CHANGELOG_ORIG_CTXT);
        if (ctxt == NULL)
@@ -1867,7 +1867,7 @@ out_stop:
 out_free:
         /* The child object shouldn't be cached anymore */
         if (rc)
-                cfs_set_bit(LU_OBJECT_HEARD_BANSHEE,
+               set_bit(LU_OBJECT_HEARD_BANSHEE,
                             &child->mo_lu.lo_header->loh_flags);
         return rc;
 }
index 5351615..ce3898b 100644 (file)
 #define CLM_PURGE 0x40000
 
 struct mdd_changelog {
-        cfs_spinlock_t                   mc_lock;    /* for index */
-        int                              mc_flags;
-        int                              mc_mask;
-        __u64                            mc_index;
-        __u64                            mc_starttime;
-        cfs_spinlock_t                   mc_user_lock;
-        int                              mc_lastuser;
+       spinlock_t              mc_lock;        /* for index */
+       int                     mc_flags;
+       int                     mc_mask;
+       __u64                   mc_index;
+       __u64                   mc_starttime;
+       spinlock_t              mc_user_lock;
+       int                     mc_lastuser;
 };
 
 static inline __u64 cl_time(void) {
@@ -94,8 +94,8 @@ struct mdd_dot_lustre_objs {
 extern const char lfsck_bookmark_name[];
 
 struct md_lfsck {
-       cfs_mutex_t           ml_mutex;
-       cfs_spinlock_t        ml_lock;
+       struct mutex          ml_mutex;
+       spinlock_t            ml_lock;
        struct ptlrpc_thread  ml_thread;
        struct dt_object     *ml_bookmark_obj;
        struct dt_object     *ml_it_obj;
index 31a34cc..16842fd 100644 (file)
@@ -53,7 +53,7 @@ static inline char *mdd_lfsck2name(struct md_lfsck *lfsck)
 
 void mdd_lfsck_set_speed(struct md_lfsck *lfsck, __u32 limit)
 {
-       cfs_spin_lock(&lfsck->ml_lock);
+       spin_lock(&lfsck->ml_lock);
        lfsck->ml_speed_limit = limit;
        if (limit != LFSCK_SPEED_NO_LIMIT) {
                if (limit > CFS_HZ) {
@@ -67,7 +67,7 @@ void mdd_lfsck_set_speed(struct md_lfsck *lfsck, __u32 limit)
                lfsck->ml_sleep_jif = 0;
                lfsck->ml_sleep_rate = 0;
        }
-       cfs_spin_unlock(&lfsck->ml_lock);
+       spin_unlock(&lfsck->ml_lock);
 }
 
 static void mdd_lfsck_control_speed(struct md_lfsck *lfsck)
@@ -77,19 +77,19 @@ static void mdd_lfsck_control_speed(struct md_lfsck *lfsck)
 
        if (lfsck->ml_sleep_jif > 0 &&
            lfsck->ml_new_scanned >= lfsck->ml_sleep_rate) {
-               cfs_spin_lock(&lfsck->ml_lock);
+               spin_lock(&lfsck->ml_lock);
                if (likely(lfsck->ml_sleep_jif > 0 &&
                           lfsck->ml_new_scanned >= lfsck->ml_sleep_rate)) {
                        lwi = LWI_TIMEOUT_INTR(lfsck->ml_sleep_jif, NULL,
                                               LWI_ON_SIGNAL_NOOP, NULL);
-                       cfs_spin_unlock(&lfsck->ml_lock);
+                       spin_unlock(&lfsck->ml_lock);
 
                        l_wait_event(thread->t_ctl_waitq,
                                     !thread_is_running(thread),
                                     &lwi);
                        lfsck->ml_new_scanned = 0;
                } else {
-                       cfs_spin_unlock(&lfsck->ml_lock);
+                       spin_unlock(&lfsck->ml_lock);
                }
        }
 }
@@ -130,9 +130,9 @@ static int mdd_lfsck_main(void *args)
         *      every bookmark, then low layer module can decide the
         *      start point for current iteration. */
 
-       cfs_spin_lock(&lfsck->ml_lock);
+       spin_lock(&lfsck->ml_lock);
        thread_set_flags(thread, SVC_RUNNING);
-       cfs_spin_unlock(&lfsck->ml_lock);
+       spin_unlock(&lfsck->ml_lock);
        cfs_waitq_broadcast(&thread->t_ctl_waitq);
 
        /* Call iops->load() to finish the choosing start point. */
@@ -195,10 +195,10 @@ fini_env:
        lu_env_fini(&env);
 
 noenv:
-       cfs_spin_lock(&lfsck->ml_lock);
+       spin_lock(&lfsck->ml_lock);
        thread_set_flags(thread, SVC_STOPPED);
        cfs_waitq_broadcast(&thread->t_ctl_waitq);
-       cfs_spin_unlock(&lfsck->ml_lock);
+       spin_unlock(&lfsck->ml_lock);
        return rc;
 }
 
@@ -215,15 +215,15 @@ int mdd_lfsck_start(const struct lu_env *env, struct md_lfsck *lfsck,
        if (lfsck->ml_it_obj == NULL)
                RETURN(-ENOTSUPP);
 
-       cfs_mutex_lock(&lfsck->ml_mutex);
-       cfs_spin_lock(&lfsck->ml_lock);
+       mutex_lock(&lfsck->ml_mutex);
+       spin_lock(&lfsck->ml_lock);
        if (thread_is_running(thread)) {
-               cfs_spin_unlock(&lfsck->ml_lock);
-               cfs_mutex_unlock(&lfsck->ml_mutex);
+               spin_unlock(&lfsck->ml_lock);
+               mutex_unlock(&lfsck->ml_mutex);
                RETURN(-EALREADY);
        }
 
-       cfs_spin_unlock(&lfsck->ml_lock);
+       spin_unlock(&lfsck->ml_lock);
        if (start->ls_valid & LSV_SPEED_LIMIT)
                mdd_lfsck_set_speed(lfsck, start->ls_speed_limit);
 
@@ -255,7 +255,7 @@ int mdd_lfsck_start(const struct lu_env *env, struct md_lfsck *lfsck,
                             thread_is_running(thread) ||
                             thread_is_stopped(thread),
                             &lwi);
-       cfs_mutex_unlock(&lfsck->ml_mutex);
+       mutex_unlock(&lfsck->ml_mutex);
 
        RETURN(rc < 0 ? rc : 0);
 }
@@ -266,22 +266,22 @@ int mdd_lfsck_stop(const struct lu_env *env, struct md_lfsck *lfsck)
        struct l_wait_info    lwi    = { 0 };
        ENTRY;
 
-       cfs_mutex_lock(&lfsck->ml_mutex);
-       cfs_spin_lock(&lfsck->ml_lock);
+       mutex_lock(&lfsck->ml_mutex);
+       spin_lock(&lfsck->ml_lock);
        if (thread_is_init(thread) || thread_is_stopped(thread)) {
-               cfs_spin_unlock(&lfsck->ml_lock);
-               cfs_mutex_unlock(&lfsck->ml_mutex);
+               spin_unlock(&lfsck->ml_lock);
+               mutex_unlock(&lfsck->ml_mutex);
                RETURN(-EALREADY);
        }
 
        thread_set_flags(thread, SVC_STOPPING);
-       cfs_spin_unlock(&lfsck->ml_lock);
+       spin_unlock(&lfsck->ml_lock);
 
        cfs_waitq_broadcast(&thread->t_ctl_waitq);
        l_wait_event(thread->t_ctl_waitq,
                     thread_is_stopped(thread),
                     &lwi);
-       cfs_mutex_unlock(&lfsck->ml_mutex);
+       mutex_unlock(&lfsck->ml_mutex);
 
        RETURN(0);
 }
@@ -301,8 +301,8 @@ int mdd_lfsck_setup(const struct lu_env *env, struct mdd_device *mdd)
        memset(lfsck, 0, sizeof(*lfsck));
        lfsck->ml_version = LFSCK_VERSION_V1;
        cfs_waitq_init(&lfsck->ml_thread.t_ctl_waitq);
-       cfs_mutex_init(&lfsck->ml_mutex);
-       cfs_spin_lock_init(&lfsck->ml_lock);
+       mutex_init(&lfsck->ml_mutex);
+       spin_lock_init(&lfsck->ml_lock);
 
        obj = dt_store_open(env, mdd->mdd_child, "", lfsck_bookmark_name,
                            &mdd_env_info(env)->mti_fid);
index 1fdbd8b..c275eb0 100644 (file)
@@ -122,7 +122,7 @@ void mdd_pdo_read_unlock(const struct lu_env *env, struct mdd_object *obj,
 #else /* !MDD_DISABLE_PDO_LOCK */
 
 #ifdef CONFIG_LOCKDEP
-static cfs_lock_class_key_t mdd_pdirop_key;
+static struct lock_class_key mdd_pdirop_key;
 
 #define RETIP ((unsigned long)__builtin_return_address(0))
 
index 8ec3ae2..91d8a84 100644 (file)
@@ -238,9 +238,9 @@ static int lprocfs_rd_changelog_users(char *page, char **start, off_t off,
                return rc;
        }
 
-        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
-        cur = mdd->mdd_cl.mc_index;
-        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+       spin_lock(&mdd->mdd_cl.mc_lock);
+       cur = mdd->mdd_cl.mc_index;
+       spin_unlock(&mdd->mdd_cl.mc_lock);
 
         cucb.count = count;
         cucb.page = page;
index e1b75af..a393b0f 100644 (file)
@@ -418,9 +418,9 @@ static int mdd_path_current(const struct lu_env *env,
         /* Verify that our path hasn't changed since we started the lookup.
            Record the current index, and verify the path resolves to the
            same fid. If it does, then the path is correct as of this index. */
-        cfs_spin_lock(&mdd->mdd_cl.mc_lock);
-        pli->pli_currec = mdd->mdd_cl.mc_index;
-        cfs_spin_unlock(&mdd->mdd_cl.mc_lock);
+       spin_lock(&mdd->mdd_cl.mc_lock);
+       pli->pli_currec = mdd->mdd_cl.mc_index;
+       spin_unlock(&mdd->mdd_cl.mc_lock);
         rc = mdd_path2fid(env, mdd, ptr, &pli->pli_fid);
         if (rc) {
                 CDEBUG(D_INFO, "mdd_path2fid(%s) failed %d\n", ptr, rc);
index 07678b6..6535e27 100644 (file)
@@ -256,17 +256,17 @@ static int mdt_ck_thread_main(void *args)
                 next = mdt->mdt_child;
                 rc = next->md_ops->mdo_update_capa_key(&env, next, tmp);
                 if (!rc) {
-                        cfs_spin_lock(&capa_lock);
-                        *bkey = *rkey;
-                        *rkey = *tmp;
-                        cfs_spin_unlock(&capa_lock);
-
-                        rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys);
-                        if (rc) {
-                                cfs_spin_lock(&capa_lock);
-                                *rkey = *bkey;
-                                memset(bkey, 0, sizeof(*bkey));
-                                cfs_spin_unlock(&capa_lock);
+                       spin_lock(&capa_lock);
+                       *bkey = *rkey;
+                       *rkey = *tmp;
+                       spin_unlock(&capa_lock);
+
+                       rc = write_capa_keys(&env, mdt, mdt->mdt_capa_keys);
+                       if (rc) {
+                               spin_lock(&capa_lock);
+                               *rkey = *bkey;
+                               memset(bkey, 0, sizeof(*bkey));
+                               spin_unlock(&capa_lock);
                         } else {
                                 set_capa_key_expiry(mdt);
                                 DEBUG_CAPA_KEY(D_SEC, rkey, "new");
index 847d8de..b42e5bf 100644 (file)
@@ -409,21 +409,21 @@ static int mdt_statfs(struct mdt_thread_info *info)
                rc = next->md_ops->mdo_statfs(info->mti_env, next, osfs);
                if (rc)
                        RETURN(rc);
-               cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
+               spin_lock(&info->mti_mdt->mdt_osfs_lock);
                info->mti_mdt->mdt_osfs = *osfs;
                info->mti_mdt->mdt_osfs_age = cfs_time_current_64();
-               cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
+               spin_unlock(&info->mti_mdt->mdt_osfs_lock);
        } else {
                /** use cached statfs data */
-               cfs_spin_lock(&info->mti_mdt->mdt_osfs_lock);
+               spin_lock(&info->mti_mdt->mdt_osfs_lock);
                *osfs = info->mti_mdt->mdt_osfs;
-               cfs_spin_unlock(&info->mti_mdt->mdt_osfs_lock);
+               spin_unlock(&info->mti_mdt->mdt_osfs_lock);
        }
 
-        if (rc == 0)
+       if (rc == 0)
                mdt_counter_incr(req, LPROC_MDT_STATFS);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 /**
@@ -1518,12 +1518,12 @@ static int mdt_set_info(struct mdt_thread_info *info)
                 req->rq_status = 0;
                 lustre_msg_set_status(req->rq_repmsg, 0);
 
-                cfs_spin_lock(&req->rq_export->exp_lock);
-                if (*(__u32 *)val)
-                        req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
-                else
-                        req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
-                cfs_spin_unlock(&req->rq_export->exp_lock);
+               spin_lock(&req->rq_export->exp_lock);
+               if (*(__u32 *)val)
+                       req->rq_export->exp_connect_flags |= OBD_CONNECT_RDONLY;
+               else
+                       req->rq_export->exp_connect_flags &=~OBD_CONNECT_RDONLY;
+               spin_unlock(&req->rq_export->exp_lock);
 
         } else if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
                 struct changelog_setinfo *cs =
@@ -1575,9 +1575,9 @@ static int mdt_connect(struct mdt_thread_info *info)
         * the connect flags in the shared export data structure. LU-1623 */
        reply = req_capsule_server_get(info->mti_pill, &RMF_CONNECT_DATA);
        exp = req->rq_export;
-       cfs_spin_lock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
        exp->exp_connect_flags = reply->ocd_connect_flags;
-       cfs_spin_unlock(&exp->exp_lock);
+       spin_unlock(&exp->exp_lock);
 
        rc = mdt_init_idmap(info);
        if (rc != 0)
@@ -4974,10 +4974,10 @@ static int mdt_adapt_sptlrpc_conf(struct obd_device *obd, int initial)
 
         sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
 
-        cfs_write_lock(&m->mdt_sptlrpc_lock);
+       write_lock(&m->mdt_sptlrpc_lock);
         sptlrpc_rule_set_free(&m->mdt_sptlrpc_rset);
         m->mdt_sptlrpc_rset = tmp_rset;
-        cfs_write_unlock(&m->mdt_sptlrpc_lock);
+       write_unlock(&m->mdt_sptlrpc_lock);
 
         return 0;
 }
@@ -5038,10 +5038,10 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
                obd->u.obt.obt_magic = OBT_MAGIC;
         }
 
-        cfs_rwlock_init(&m->mdt_sptlrpc_lock);
+       rwlock_init(&m->mdt_sptlrpc_lock);
         sptlrpc_rule_set_init(&m->mdt_sptlrpc_rset);
 
-        cfs_spin_lock_init(&m->mdt_ioepoch_lock);
+       spin_lock_init(&m->mdt_ioepoch_lock);
         m->mdt_opts.mo_compat_resname = 0;
         m->mdt_opts.mo_mds_capa = 1;
         m->mdt_opts.mo_oss_capa = 1;
@@ -5053,8 +5053,8 @@ static int mdt_init0(const struct lu_env *env, struct mdt_device *m,
         CFS_INIT_LIST_HEAD(&m->mdt_nosquash_nids);
         m->mdt_nosquash_str = NULL;
         m->mdt_nosquash_strlen = 0;
-        cfs_init_rwsem(&m->mdt_squash_sem);
-       cfs_spin_lock_init(&m->mdt_osfs_lock);
+       init_rwsem(&m->mdt_squash_sem);
+       spin_lock_init(&m->mdt_osfs_lock);
        m->mdt_osfs_age = cfs_time_shift_64(-1000);
 
         m->mdt_md_dev.md_lu_dev.ld_ops = &mdt_lu_ops;
@@ -5324,8 +5324,8 @@ static struct lu_object *mdt_object_alloc(const struct lu_env *env,
                 lu_object_init(o, h, d);
                 lu_object_add_top(h, o);
                 o->lo_ops = &mdt_obj_ops;
-                cfs_mutex_init(&mo->mot_ioepoch_mutex);
-                cfs_mutex_init(&mo->mot_lov_mutex);
+               mutex_init(&mo->mot_ioepoch_mutex);
+               mutex_init(&mo->mot_lov_mutex);
                 RETURN(o);
         } else
                 RETURN(NULL);
@@ -5414,13 +5414,13 @@ static int mdt_prepare(const struct lu_env *env,
        if (rc)
                RETURN(rc);
 
-       LASSERT(!cfs_test_bit(MDT_FL_CFGLOG, &mdt->mdt_state));
+       LASSERT(!test_bit(MDT_FL_CFGLOG, &mdt->mdt_state));
        target_recovery_init(&mdt->mdt_lut, mdt_recovery_handle);
-       cfs_set_bit(MDT_FL_CFGLOG, &mdt->mdt_state);
+       set_bit(MDT_FL_CFGLOG, &mdt->mdt_state);
        LASSERT(obd->obd_no_conn);
-       cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
        obd->obd_no_conn = 0;
-       cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
        if (obd->obd_recovering == 0)
                mdt_postrecov(env, mdt);
@@ -5529,9 +5529,9 @@ static int mdt_connect_internal(struct obd_export *exp,
         * connection, and it is safe to expose this flag before connection
         * processing completes. */
        if (data->ocd_connect_flags & OBD_CONNECT_LIGHTWEIGHT) {
-               cfs_spin_lock(&exp->exp_lock);
+               spin_lock(&exp->exp_lock);
                exp->exp_connect_flags |=  OBD_CONNECT_LIGHTWEIGHT;
-               cfs_spin_unlock(&exp->exp_lock);
+               spin_unlock(&exp->exp_lock);
        }
 
        data->ocd_version = LUSTRE_VERSION_CODE;
@@ -5554,21 +5554,21 @@ static int mdt_connect_internal(struct obd_export *exp,
 }
 
 static int mdt_connect_check_sptlrpc(struct mdt_device *mdt,
-                                     struct obd_export *exp,
-                                     struct ptlrpc_request *req)
+                                    struct obd_export *exp,
+                                    struct ptlrpc_request *req)
 {
-        struct sptlrpc_flavor   flvr;
-        int                     rc = 0;
+       struct sptlrpc_flavor   flvr;
+       int                     rc = 0;
 
-        if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
-                cfs_read_lock(&mdt->mdt_sptlrpc_lock);
-                sptlrpc_target_choose_flavor(&mdt->mdt_sptlrpc_rset,
-                                             req->rq_sp_from,
-                                             req->rq_peer.nid,
-                                             &flvr);
-                cfs_read_unlock(&mdt->mdt_sptlrpc_lock);
+       if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
+               read_lock(&mdt->mdt_sptlrpc_lock);
+               sptlrpc_target_choose_flavor(&mdt->mdt_sptlrpc_rset,
+                                            req->rq_sp_from,
+                                            req->rq_peer.nid,
+                                            &flvr);
+               read_unlock(&mdt->mdt_sptlrpc_lock);
 
-                cfs_spin_lock(&exp->exp_lock);
+               spin_lock(&exp->exp_lock);
 
                 exp->exp_sp_peer = req->rq_sp_from;
                 exp->exp_flvr = flvr;
@@ -5582,7 +5582,7 @@ static int mdt_connect_check_sptlrpc(struct mdt_device *mdt,
                         rc = -EACCES;
                 }
 
-                cfs_spin_unlock(&exp->exp_lock);
+               spin_unlock(&exp->exp_lock);
         } else {
                 if (exp->exp_sp_peer != req->rq_sp_from) {
                         CERROR("RPC source %s doesn't match %s\n",
@@ -5625,11 +5625,11 @@ static int mdt_obd_connect(const struct lu_env *env,
         * XXX: probably not very appropriate method is used now
         *      at some point we should find a better one
         */
-       if (!cfs_test_bit(MDT_FL_SYNCED, &mdt->mdt_state)) {
+       if (!test_bit(MDT_FL_SYNCED, &mdt->mdt_state)) {
                rc = obd_health_check(env, mdt->mdt_child_exp->exp_obd);
                if (rc)
                        RETURN(-EAGAIN);
-               cfs_set_bit(MDT_FL_SYNCED, &mdt->mdt_state);
+               set_bit(MDT_FL_SYNCED, &mdt->mdt_state);
        }
 
         rc = class_connect(&conn, obd, cluuid);
@@ -5711,17 +5711,17 @@ static int mdt_export_cleanup(struct obd_export *exp)
         int rc = 0;
         ENTRY;
 
-        cfs_spin_lock(&med->med_open_lock);
-        while (!cfs_list_empty(&med->med_open_head)) {
-                cfs_list_t *tmp = med->med_open_head.next;
-                mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list);
+       spin_lock(&med->med_open_lock);
+       while (!cfs_list_empty(&med->med_open_head)) {
+               cfs_list_t *tmp = med->med_open_head.next;
+               mfd = cfs_list_entry(tmp, struct mdt_file_data, mfd_list);
 
-                /* Remove mfd handle so it can't be found again.
-                 * We are consuming the mfd_list reference here. */
-                class_handle_unhash(&mfd->mfd_handle);
-                cfs_list_move_tail(&mfd->mfd_list, &closing_list);
-        }
-        cfs_spin_unlock(&med->med_open_lock);
+               /* Remove mfd handle so it can't be found again.
+                * We are consuming the mfd_list reference here. */
+               class_handle_unhash(&mfd->mfd_handle);
+               cfs_list_move_tail(&mfd->mfd_list, &closing_list);
+       }
+       spin_unlock(&med->med_open_lock);
         mdt = mdt_dev(obd->obd_lu_dev);
         LASSERT(mdt != NULL);
 
@@ -5786,12 +5786,12 @@ static int mdt_init_export(struct obd_export *exp)
         ENTRY;
 
         CFS_INIT_LIST_HEAD(&med->med_open_head);
-        cfs_spin_lock_init(&med->med_open_lock);
-        cfs_mutex_init(&med->med_idmap_mutex);
-        med->med_idmap = NULL;
-        cfs_spin_lock(&exp->exp_lock);
-        exp->exp_connecting = 1;
-        cfs_spin_unlock(&exp->exp_lock);
+       spin_lock_init(&med->med_open_lock);
+       mutex_init(&med->med_idmap_mutex);
+       med->med_idmap = NULL;
+       spin_lock(&exp->exp_lock);
+       exp->exp_connecting = 1;
+       spin_unlock(&exp->exp_lock);
 
         /* self-export doesn't need client data and ldlm initialization */
         if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
index 0e2a3e2..f5c8403 100644 (file)
@@ -114,7 +114,7 @@ static int mdt_identity_do_upcall(struct upcall_cache *cache,
         /* There is race condition:
          * "uc_upcall" was changed just after "is_identity_get_disabled" check.
          */
-        cfs_read_lock(&cache->uc_upcall_rwlock);
+       read_lock(&cache->uc_upcall_rwlock);
         CDEBUG(D_INFO, "The upcall is: '%s'\n", cache->uc_upcall);
 
         if (unlikely(!strcmp(cache->uc_upcall, "NONE"))) {
@@ -142,7 +142,7 @@ static int mdt_identity_do_upcall(struct upcall_cache *cache,
         }
         EXIT;
 out:
-        cfs_read_unlock(&cache->uc_upcall_rwlock);
+       read_unlock(&cache->uc_upcall_rwlock);
         return rc;
 }
 
index 1d81098..be8430b 100644 (file)
@@ -197,10 +197,10 @@ int mdt_init_idmap(struct mdt_thread_info *info)
         ENTRY;
 
         if (exp_connect_rmtclient(exp)) {
-                cfs_mutex_lock(&med->med_idmap_mutex);
+               mutex_lock(&med->med_idmap_mutex);
                 if (!med->med_idmap)
                         med->med_idmap = lustre_idmap_init();
-                cfs_mutex_unlock(&med->med_idmap_mutex);
+               mutex_unlock(&med->med_idmap_mutex);
 
                 if (IS_ERR(med->med_idmap)) {
                         long err = PTR_ERR(med->med_idmap);
@@ -227,12 +227,12 @@ int mdt_init_idmap(struct mdt_thread_info *info)
 
 void mdt_cleanup_idmap(struct mdt_export_data *med)
 {
-        cfs_mutex_lock(&med->med_idmap_mutex);
+       mutex_lock(&med->med_idmap_mutex);
         if (med->med_idmap != NULL) {
                 lustre_idmap_fini(med->med_idmap);
                 med->med_idmap = NULL;
         }
-        cfs_mutex_unlock(&med->med_idmap_mutex);
+       mutex_unlock(&med->med_idmap_mutex);
 }
 
 static inline void mdt_revoke_export_locks(struct obd_export *exp)
index fd1cc0a..57d0d77 100644 (file)
@@ -133,7 +133,7 @@ struct mdt_device {
         /* mdt state flags */
         unsigned long              mdt_state;
         /* lock to protect IOepoch */
-        cfs_spinlock_t             mdt_ioepoch_lock;
+       spinlock_t                 mdt_ioepoch_lock;
         __u64                      mdt_ioepoch;
 
         /* transaction callbacks */
@@ -147,7 +147,7 @@ struct mdt_device {
         struct upcall_cache        *mdt_identity_cache;
 
         /* sptlrpc rules */
-        cfs_rwlock_t               mdt_sptlrpc_lock;
+       rwlock_t                   mdt_sptlrpc_lock;
         struct sptlrpc_rule_set    mdt_sptlrpc_rset;
 
         /* capability keys */
@@ -163,9 +163,9 @@ struct mdt_device {
                                    mdt_som_conf:1;
 
        /* statfs optimization: we cache a bit  */
-       struct obd_statfs          mdt_osfs;
-       __u64                      mdt_osfs_age;
-       cfs_spinlock_t             mdt_osfs_lock;
+       struct obd_statfs          mdt_osfs;
+       __u64                      mdt_osfs_age;
+       spinlock_t                 mdt_osfs_lock;
 
         /* root squash */
         uid_t                      mdt_squash_uid;
@@ -173,7 +173,7 @@ struct mdt_device {
         cfs_list_t                 mdt_nosquash_nids;
         char                      *mdt_nosquash_str;
         int                        mdt_nosquash_strlen;
-        cfs_rw_semaphore_t         mdt_squash_sem;
+       struct rw_semaphore        mdt_squash_sem;
 
         cfs_proc_dir_entry_t      *mdt_proc_entry;
         struct lprocfs_stats      *mdt_stats;
@@ -202,9 +202,9 @@ struct mdt_object {
         int                     mot_ioepoch_count;
         int                     mot_writecount;
         /* Lock to protect object's IO epoch. */
-        cfs_mutex_t             mot_ioepoch_mutex;
+       struct mutex            mot_ioepoch_mutex;
         /* Lock to protect create_data */
-        cfs_mutex_t             mot_lov_mutex;
+       struct mutex            mot_lov_mutex;
 };
 
 enum mdt_object_flags {
index 5ed23d6..f5736a1 100644 (file)
@@ -78,16 +78,16 @@ void mdt_exit_ucred(struct mdt_thread_info *info)
         }
 }
 
-static int match_nosquash_list(cfs_rw_semaphore_t *sem,
-                               cfs_list_t *nidlist,
-                               lnet_nid_t peernid)
+static int match_nosquash_list(struct rw_semaphore *sem,
+                              cfs_list_t *nidlist,
+                              lnet_nid_t peernid)
 {
-        int rc;
-        ENTRY;
-        cfs_down_read(sem);
-        rc = cfs_match_nid(peernid, nidlist);
-        cfs_up_read(sem);
-        RETURN(rc);
+       int rc;
+       ENTRY;
+       down_read(sem);
+       rc = cfs_match_nid(peernid, nidlist);
+       up_read(sem);
+       RETURN(rc);
 }
 
 /* root_squash for inter-MDS operations */
index 26d4692..75b7aff 100644 (file)
@@ -169,15 +169,15 @@ LPROC_SEQ_FOPS(mdt_rename_stats);
 
 static int lproc_mdt_attach_rename_seqstat(struct mdt_device *mdt)
 {
-        struct lu_device *ld = &mdt->mdt_md_dev.md_lu_dev;
-        struct obd_device *obd = ld->ld_obd;
-        int i;
+       struct lu_device *ld = &mdt->mdt_md_dev.md_lu_dev;
+       struct obd_device *obd = ld->ld_obd;
+       int i;
 
-        for (i = 0; i < RENAME_LAST; i++)
-                spin_lock_init(&mdt->mdt_rename_stats.hist[i].oh_lock);
+       for (i = 0; i < RENAME_LAST; i++)
+               spin_lock_init(&mdt->mdt_rename_stats.hist[i].oh_lock);
 
-        return lprocfs_obd_seq_create(obd, "rename_stats", 0444,
-                                      &mdt_rename_stats_fops, mdt);
+       return lprocfs_obd_seq_create(obd, "rename_stats", 0444,
+                                     &mdt_rename_stats_fops, mdt);
 }
 
 void mdt_rename_counter_tally(struct mdt_thread_info *info,
@@ -376,11 +376,11 @@ static int lprocfs_rd_identity_upcall(char *page, char **start, off_t off,
         struct upcall_cache *hash = mdt->mdt_identity_cache;
         int len;
 
-        *eof = 1;
-        cfs_read_lock(&hash->uc_upcall_rwlock);
-        len = snprintf(page, count, "%s\n", hash->uc_upcall);
-        cfs_read_unlock(&hash->uc_upcall_rwlock);
-        return len;
+       *eof = 1;
+       read_lock(&hash->uc_upcall_rwlock);
+       len = snprintf(page, count, "%s\n", hash->uc_upcall);
+       read_unlock(&hash->uc_upcall_rwlock);
+       return len;
 }
 
 static int lprocfs_wr_identity_upcall(struct file *file, const char *buffer,
@@ -403,9 +403,9 @@ static int lprocfs_wr_identity_upcall(struct file *file, const char *buffer,
                 GOTO(failed, rc = -EFAULT);
 
         /* Remove any extraneous bits from the upcall (e.g. linefeeds) */
-        cfs_write_lock(&hash->uc_upcall_rwlock);
-        sscanf(kernbuf, "%s", hash->uc_upcall);
-        cfs_write_unlock(&hash->uc_upcall_rwlock);
+       write_lock(&hash->uc_upcall_rwlock);
+       sscanf(kernbuf, "%s", hash->uc_upcall);
+       write_unlock(&hash->uc_upcall_rwlock);
 
         if (strcmp(hash->uc_name, obd->obd_name) != 0)
                 CWARN("%s: write to upcall name %s\n",
@@ -832,7 +832,7 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer,
 
         if (!strcmp(kernbuf, "NONE") || !strcmp(kernbuf, "clear")) {
                 /* empty string is special case */
-                cfs_down_write(&mdt->mdt_squash_sem);
+               down_write(&mdt->mdt_squash_sem);
                 if (!cfs_list_empty(&mdt->mdt_nosquash_nids)) {
                         cfs_free_nidlist(&mdt->mdt_nosquash_nids);
                         OBD_FREE(mdt->mdt_nosquash_str,
@@ -840,7 +840,7 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer,
                         mdt->mdt_nosquash_str = NULL;
                         mdt->mdt_nosquash_strlen = 0;
                 }
-                cfs_up_write(&mdt->mdt_squash_sem);
+               up_write(&mdt->mdt_squash_sem);
                 LCONSOLE_INFO("%s: nosquash_nids is cleared\n",
                               obd->obd_name);
                 OBD_FREE(kernbuf, count + 1);
@@ -853,7 +853,7 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer,
                 GOTO(failed, rc = -EINVAL);
         }
 
-        cfs_down_write(&mdt->mdt_squash_sem);
+       down_write(&mdt->mdt_squash_sem);
         if (!cfs_list_empty(&mdt->mdt_nosquash_nids)) {
                 cfs_free_nidlist(&mdt->mdt_nosquash_nids);
                 OBD_FREE(mdt->mdt_nosquash_str, mdt->mdt_nosquash_strlen);
@@ -864,7 +864,7 @@ static int lprocfs_wr_nosquash_nids(struct file *file, const char *buffer,
 
         LCONSOLE_INFO("%s: nosquash_nids is set to %s\n",
                       obd->obd_name, kernbuf);
-        cfs_up_write(&mdt->mdt_squash_sem);
+       up_write(&mdt->mdt_squash_sem);
         RETURN(count);
 
  failed:
index 94b2531..7e96491 100644 (file)
@@ -118,7 +118,7 @@ static int mdt_create_data(struct mdt_thread_info *info,
 
         ma->ma_need = MA_INODE | MA_LOV;
         ma->ma_valid = 0;
-        cfs_mutex_lock(&o->mot_lov_mutex);
+       mutex_lock(&o->mot_lov_mutex);
         if (!(o->mot_flags & MOF_LOV_CREATED)) {
                 rc = mdo_create_data(info->mti_env,
                                      p ? mdt_object_child(p) : NULL,
@@ -129,7 +129,7 @@ static int mdt_create_data(struct mdt_thread_info *info,
                 if (rc == 0 && ma->ma_valid & MA_LOV)
                         o->mot_flags |= MOF_LOV_CREATED;
         }
-        cfs_mutex_unlock(&o->mot_lov_mutex);
+       mutex_unlock(&o->mot_lov_mutex);
         RETURN(rc);
 }
 
@@ -172,14 +172,14 @@ int mdt_ioepoch_open(struct mdt_thread_info *info, struct mdt_object *o,
             !S_ISREG(lu_object_attr(&o->mot_obj.mo_lu)))
                 RETURN(0);
 
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
-        if (mdt_ioepoch_opened(o)) {
-                /* Epoch continues even if there is no writers yet. */
-                CDEBUG(D_INODE, "continue epoch "LPU64" for "DFID"\n",
-                       o->mot_ioepoch, PFID(mdt_object_fid(o)));
-        } else {
-                /* XXX: ->mdt_ioepoch is not initialized at the mount */
-                cfs_spin_lock(&mdt->mdt_ioepoch_lock);
+       mutex_lock(&o->mot_ioepoch_mutex);
+       if (mdt_ioepoch_opened(o)) {
+               /* Epoch continues even if there is no writers yet. */
+               CDEBUG(D_INODE, "continue epoch "LPU64" for "DFID"\n",
+                      o->mot_ioepoch, PFID(mdt_object_fid(o)));
+       } else {
+               /* XXX: ->mdt_ioepoch is not initialized at the mount */
+               spin_lock(&mdt->mdt_ioepoch_lock);
                 if (mdt->mdt_ioepoch < info->mti_replayepoch)
                         mdt->mdt_ioepoch = info->mti_replayepoch;
 
@@ -190,16 +190,16 @@ int mdt_ioepoch_open(struct mdt_thread_info *info, struct mdt_object *o,
                 else
                         o->mot_ioepoch = mdt->mdt_ioepoch;
 
-                cfs_spin_unlock(&mdt->mdt_ioepoch_lock);
+               spin_unlock(&mdt->mdt_ioepoch_lock);
 
-                CDEBUG(D_INODE, "starting epoch "LPU64" for "DFID"\n",
-                       o->mot_ioepoch, PFID(mdt_object_fid(o)));
-                if (created)
-                        o->mot_flags |= MOF_SOM_CREATED;
-                cancel = 1;
-        }
-        o->mot_ioepoch_count++;
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+               CDEBUG(D_INODE, "starting epoch "LPU64" for "DFID"\n",
+                      o->mot_ioepoch, PFID(mdt_object_fid(o)));
+               if (created)
+                       o->mot_flags |= MOF_SOM_CREATED;
+               cancel = 1;
+       }
+       o->mot_ioepoch_count++;
+       mutex_unlock(&o->mot_ioepoch_mutex);
 
         /* Cancel Size-on-MDS attributes cached on clients for the open case.
          * In the truncate case, see mdt_reint_setattr(). */
@@ -274,7 +274,7 @@ static inline int mdt_ioepoch_close_on_eviction(struct mdt_thread_info *info,
 {
         int rc = 0;
 
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         CDEBUG(D_INODE, "Eviction. Closing IOepoch "LPU64" on "DFID". "
                "Count %d\n", o->mot_ioepoch, PFID(mdt_object_fid(o)),
                o->mot_ioepoch_count);
@@ -287,7 +287,7 @@ static inline int mdt_ioepoch_close_on_eviction(struct mdt_thread_info *info,
                 rc = mdt_som_attr_set(info, o, o->mot_ioepoch, MDT_SOM_DISABLE);
                 mdt_object_som_enable(o, o->mot_ioepoch);
         }
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         RETURN(rc);
 }
 
@@ -302,7 +302,7 @@ static inline int mdt_ioepoch_close_on_replay(struct mdt_thread_info *info,
         int rc = MDT_IOEPOCH_CLOSED;
         ENTRY;
 
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         CDEBUG(D_INODE, "Replay. Closing epoch "LPU64" on "DFID". Count %d\n",
                o->mot_ioepoch, PFID(mdt_object_fid(o)), o->mot_ioepoch_count);
         o->mot_ioepoch_count--;
@@ -314,7 +314,7 @@ static inline int mdt_ioepoch_close_on_replay(struct mdt_thread_info *info,
 
         if (!mdt_ioepoch_opened(o))
                 mdt_object_som_enable(o, info->mti_ioepoch->ioepoch);
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
 
         RETURN(rc);
 }
@@ -343,7 +343,7 @@ static inline int mdt_ioepoch_close_reg(struct mdt_thread_info *info,
         la = &info->mti_attr.ma_attr;
         achange = (info->mti_ioepoch->flags & MF_SOM_CHANGE);
 
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         o->mot_ioepoch_count--;
 
         tmp_ma = &info->mti_u.som.attr;
@@ -410,7 +410,7 @@ static inline int mdt_ioepoch_close_reg(struct mdt_thread_info *info,
                 mdt_object_som_enable(o, o->mot_ioepoch);
         }
 
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         /* If recovery is needed, tell the client to perform GETATTR under
          * the lock. */
         if (ret == MDT_IOEPOCH_GETATTR && recovery) {
@@ -422,7 +422,7 @@ static inline int mdt_ioepoch_close_reg(struct mdt_thread_info *info,
         RETURN(rc ? : ret);
 
 error_up:
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         return rc;
 }
 
@@ -491,7 +491,7 @@ int mdt_som_au_close(struct mdt_thread_info *info, struct mdt_object *o)
              !(info->mti_attr.ma_attr.la_valid & LA_SIZE)))
                 act = MDT_SOM_DISABLE;
 
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         /* Mark the object it is the recovery state if we failed to obtain
          * SOM attributes. */
         if (act == MDT_SOM_DISABLE)
@@ -505,7 +505,7 @@ int mdt_som_au_close(struct mdt_thread_info *info, struct mdt_object *o)
                         rc = mdt_som_attr_set(info, o, ioepoch, act);
                 mdt_object_som_enable(o, ioepoch);
         }
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         RETURN(rc);
 }
 
@@ -513,9 +513,9 @@ int mdt_write_read(struct mdt_object *o)
 {
         int rc = 0;
         ENTRY;
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         rc = o->mot_writecount;
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         RETURN(rc);
 }
 
@@ -523,21 +523,21 @@ int mdt_write_get(struct mdt_object *o)
 {
         int rc = 0;
         ENTRY;
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         if (o->mot_writecount < 0)
                 rc = -ETXTBSY;
         else
                 o->mot_writecount++;
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         RETURN(rc);
 }
 
 void mdt_write_put(struct mdt_object *o)
 {
         ENTRY;
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         o->mot_writecount--;
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         EXIT;
 }
 
@@ -545,21 +545,21 @@ static int mdt_write_deny(struct mdt_object *o)
 {
         int rc = 0;
         ENTRY;
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         if (o->mot_writecount > 0)
                 rc = -ETXTBSY;
         else
                 o->mot_writecount--;
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         RETURN(rc);
 }
 
 static void mdt_write_allow(struct mdt_object *o)
 {
         ENTRY;
-        cfs_mutex_lock(&o->mot_ioepoch_mutex);
+       mutex_lock(&o->mot_ioepoch_mutex);
         o->mot_writecount++;
-        cfs_mutex_unlock(&o->mot_ioepoch_mutex);
+       mutex_unlock(&o->mot_ioepoch_mutex);
         EXIT;
 }
 
@@ -576,7 +576,7 @@ static void mdt_empty_transno(struct mdt_thread_info *info, int rc)
         if (lustre_msg_get_transno(req->rq_repmsg) != 0)
                 RETURN_EXIT;
 
-        cfs_spin_lock(&mdt->mdt_lut.lut_translock);
+       spin_lock(&mdt->mdt_lut.lut_translock);
        if (rc != 0) {
                if (info->mti_transno != 0) {
                        struct obd_export *exp = req->rq_export;
@@ -589,13 +589,13 @@ static void mdt_empty_transno(struct mdt_thread_info *info, int rc)
                        RETURN_EXIT;
                }
        } else if (info->mti_transno == 0) {
-                info->mti_transno = ++ mdt->mdt_lut.lut_last_transno;
-        } else {
-                /* should be replay */
-                if (info->mti_transno > mdt->mdt_lut.lut_last_transno)
-                        mdt->mdt_lut.lut_last_transno = info->mti_transno;
+               info->mti_transno = ++mdt->mdt_lut.lut_last_transno;
+       } else {
+               /* should be replay */
+               if (info->mti_transno > mdt->mdt_lut.lut_last_transno)
+                       mdt->mdt_lut.lut_last_transno = info->mti_transno;
        }
-        cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+       spin_unlock(&mdt->mdt_lut.lut_translock);
 
         CDEBUG(D_INODE, "transno = "LPU64", last_committed = "LPU64"\n",
                         info->mti_transno,
@@ -607,7 +607,7 @@ static void mdt_empty_transno(struct mdt_thread_info *info, int rc)
         /* update lcd in memory only for resent cases */
         ted = &req->rq_export->exp_target_data;
         LASSERT(ted);
-        cfs_mutex_lock(&ted->ted_lcd_lock);
+       mutex_lock(&ted->ted_lcd_lock);
         lcd = ted->ted_lcd;
        if (info->mti_transno < lcd->lcd_last_transno &&
            info->mti_transno != 0) {
@@ -615,7 +615,7 @@ static void mdt_empty_transno(struct mdt_thread_info *info, int rc)
                 * last rcvd info if replay req transno < last transno,
                 * otherwise the following resend(after replay) can not
                 * be checked correctly by xid */
-               cfs_mutex_unlock(&ted->ted_lcd_lock);
+               mutex_unlock(&ted->ted_lcd_lock);
                CDEBUG(D_HA, "%s: transno = "LPU64" < last_transno = "LPU64"\n",
                        mdt->mdt_md_dev.md_lu_dev.ld_obd->obd_name,
                        info->mti_transno, lcd->lcd_last_transno);
@@ -644,7 +644,7 @@ static void mdt_empty_transno(struct mdt_thread_info *info, int rc)
                 lcd->lcd_last_result = rc;
                 lcd->lcd_last_data = info->mti_opdata;
         }
-        cfs_mutex_unlock(&ted->ted_lcd_lock);
+       mutex_unlock(&ted->ted_lcd_lock);
 
         EXIT;
 }
@@ -754,10 +754,10 @@ static int mdt_mfd_open(struct mdt_thread_info *info, struct mdt_object *p,
                                        "cookie=" LPX64"\n", mfd,
                                        PFID(mdt_object_fid(mfd->mfd_object)),
                                        info->mti_rr.rr_handle->cookie);
-                                cfs_spin_lock(&med->med_open_lock);
-                                class_handle_unhash(&old_mfd->mfd_handle);
-                                cfs_list_del_init(&old_mfd->mfd_list);
-                                cfs_spin_unlock(&med->med_open_lock);
+                               spin_lock(&med->med_open_lock);
+                               class_handle_unhash(&old_mfd->mfd_handle);
+                               cfs_list_del_init(&old_mfd->mfd_list);
+                               spin_unlock(&med->med_open_lock);
                                 /* no attr update for that close */
                                 la->la_valid = 0;
                                 ma->ma_valid |= MA_FLAGS;
@@ -774,15 +774,15 @@ static int mdt_mfd_open(struct mdt_thread_info *info, struct mdt_object *p,
                 repbody->handle.cookie = mfd->mfd_handle.h_cookie;
 
                 if (req->rq_export->exp_disconnected) {
-                        cfs_spin_lock(&med->med_open_lock);
-                        class_handle_unhash(&mfd->mfd_handle);
-                        cfs_list_del_init(&mfd->mfd_list);
-                        cfs_spin_unlock(&med->med_open_lock);
-                        mdt_mfd_close(info, mfd);
-                } else {
-                        cfs_spin_lock(&med->med_open_lock);
-                        cfs_list_add(&mfd->mfd_list, &med->med_open_head);
-                        cfs_spin_unlock(&med->med_open_lock);
+                       spin_lock(&med->med_open_lock);
+                       class_handle_unhash(&mfd->mfd_handle);
+                       cfs_list_del_init(&mfd->mfd_list);
+                       spin_unlock(&med->med_open_lock);
+                       mdt_mfd_close(info, mfd);
+               } else {
+                       spin_lock(&med->med_open_lock);
+                       cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+                       spin_unlock(&med->med_open_lock);
                 }
 
                 mdt_empty_transno(info, rc);
@@ -942,15 +942,14 @@ int mdt_finish_open(struct mdt_thread_info *info,
 
         mfd = NULL;
         if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
-                cfs_spin_lock(&med->med_open_lock);
-                cfs_list_for_each(t, &med->med_open_head) {
-                        mfd = cfs_list_entry(t, struct mdt_file_data, mfd_list);
-                        if (mfd->mfd_xid == req->rq_xid) {
-                                break;
-                        }
-                        mfd = NULL;
-                }
-                cfs_spin_unlock(&med->med_open_lock);
+               spin_lock(&med->med_open_lock);
+               cfs_list_for_each(t, &med->med_open_head) {
+                       mfd = cfs_list_entry(t, struct mdt_file_data, mfd_list);
+                       if (mfd->mfd_xid == req->rq_xid)
+                               break;
+                       mfd = NULL;
+               }
+               spin_unlock(&med->med_open_lock);
 
                 if (mfd != NULL) {
                         repbody->handle.cookie = mfd->mfd_handle.h_cookie;
@@ -1628,10 +1627,10 @@ int mdt_mfd_close(struct mdt_thread_info *info, struct mdt_file_data *mfd)
 
                 LASSERT(mdt_info_req(info));
                 med = &mdt_info_req(info)->rq_export->exp_mdt_data;
-                cfs_spin_lock(&med->med_open_lock);
-                cfs_list_add(&mfd->mfd_list, &med->med_open_head);
-                class_handle_hash_back(&mfd->mfd_handle);
-                cfs_spin_unlock(&med->med_open_lock);
+               spin_lock(&med->med_open_lock);
+               cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+               class_handle_hash_back(&mfd->mfd_handle);
+               spin_unlock(&med->med_open_lock);
 
                 if (ret == MDT_IOEPOCH_OPENED) {
                         ret = 0;
@@ -1701,19 +1700,19 @@ int mdt_close(struct mdt_thread_info *info)
         }
 
         med = &req->rq_export->exp_mdt_data;
-        cfs_spin_lock(&med->med_open_lock);
-        mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
-        if (mdt_mfd_closed(mfd)) {
-                cfs_spin_unlock(&med->med_open_lock);
-                CDEBUG(D_INODE, "no handle for file close: fid = "DFID
-                       ": cookie = "LPX64"\n", PFID(info->mti_rr.rr_fid1),
-                       info->mti_ioepoch->handle.cookie);
-                /** not serious error since bug 3633 */
-                rc = -ESTALE;
-        } else {
-                class_handle_unhash(&mfd->mfd_handle);
-                cfs_list_del_init(&mfd->mfd_list);
-                cfs_spin_unlock(&med->med_open_lock);
+       spin_lock(&med->med_open_lock);
+       mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
+       if (mdt_mfd_closed(mfd)) {
+               spin_unlock(&med->med_open_lock);
+               CDEBUG(D_INODE, "no handle for file close: fid = "DFID
+                      ": cookie = "LPX64"\n", PFID(info->mti_rr.rr_fid1),
+                      info->mti_ioepoch->handle.cookie);
+               /** not serious error since bug 3633 */
+               rc = -ESTALE;
+       } else {
+               class_handle_unhash(&mfd->mfd_handle);
+               cfs_list_del_init(&mfd->mfd_list);
+               spin_unlock(&med->med_open_lock);
 
                 /* Do not lose object before last unlink. */
                 o = mfd->mfd_object;
@@ -1773,10 +1772,10 @@ int mdt_done_writing(struct mdt_thread_info *info)
                 RETURN(lustre_msg_get_status(req->rq_repmsg));
 
         med = &info->mti_exp->exp_mdt_data;
-        cfs_spin_lock(&med->med_open_lock);
-        mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
-        if (mfd == NULL) {
-                cfs_spin_unlock(&med->med_open_lock);
+       spin_lock(&med->med_open_lock);
+       mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
+       if (mfd == NULL) {
+               spin_unlock(&med->med_open_lock);
                 CDEBUG(D_INODE, "no handle for done write: fid = "DFID
                        ": cookie = "LPX64" ioepoch = "LPU64"\n",
                        PFID(info->mti_rr.rr_fid1),
@@ -1796,7 +1795,7 @@ int mdt_done_writing(struct mdt_thread_info *info)
                 mfd->mfd_mode == MDS_FMODE_TRUNC);
         class_handle_unhash(&mfd->mfd_handle);
         cfs_list_del_init(&mfd->mfd_list);
-        cfs_spin_unlock(&med->med_open_lock);
+       spin_unlock(&med->med_open_lock);
 
         /* Set EPOCH CLOSE flag if not set by client. */
         info->mti_ioepoch->flags |= MF_EPOCH_CLOSE;
index 909bd10..1720cd4 100644 (file)
@@ -151,20 +151,20 @@ static int mdt_clients_data_init(const struct lu_env *env,
                 LASSERTF(rc == 0, "rc = %d\n", rc);
                 /* VBR: set export last committed version */
                 exp->exp_last_committed = last_transno;
-                cfs_spin_lock(&exp->exp_lock);
-                exp->exp_connecting = 0;
-                exp->exp_in_recovery = 0;
-                cfs_spin_unlock(&exp->exp_lock);
-                obd->obd_max_recoverable_clients++;
-                class_export_put(exp);
-
-                CDEBUG(D_OTHER, "client at idx %d has last_transno="LPU64"\n",
-                       cl_idx, last_transno);
-                /* protect __u64 value update */
-                cfs_spin_lock(&mdt->mdt_lut.lut_translock);
-                mdt->mdt_lut.lut_last_transno = max(last_transno,
-                                                mdt->mdt_lut.lut_last_transno);
-                cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+               spin_lock(&exp->exp_lock);
+               exp->exp_connecting = 0;
+               exp->exp_in_recovery = 0;
+               spin_unlock(&exp->exp_lock);
+               obd->obd_max_recoverable_clients++;
+               class_export_put(exp);
+
+               CDEBUG(D_OTHER, "client at idx %d has last_transno ="LPU64"\n",
+                      cl_idx, last_transno);
+               /* protect __u64 value update */
+               spin_lock(&mdt->mdt_lut.lut_translock);
+               mdt->mdt_lut.lut_last_transno = max(last_transno,
+                                               mdt->mdt_lut.lut_last_transno);
+               spin_unlock(&mdt->mdt_lut.lut_translock);
         }
 
 err_client:
@@ -270,9 +270,9 @@ static int mdt_server_data_init(const struct lu_env *env,
 
         lsd->lsd_feature_incompat |= OBD_INCOMPAT_FID;
 
-        cfs_spin_lock(&mdt->mdt_lut.lut_translock);
-        mdt->mdt_lut.lut_last_transno = lsd->lsd_last_transno;
-        cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+       spin_lock(&mdt->mdt_lut.lut_translock);
+       mdt->mdt_lut.lut_last_transno = lsd->lsd_last_transno;
+       spin_unlock(&mdt->mdt_lut.lut_translock);
 
         CDEBUG(D_INODE, "========BEGIN DUMPING LAST_RCVD========\n");
         CDEBUG(D_INODE, "%s: server last_transno: "LPU64"\n",
@@ -303,11 +303,11 @@ static int mdt_server_data_init(const struct lu_env *env,
         if (rc)
                 GOTO(err_client, rc);
 
-        cfs_spin_lock(&mdt->mdt_lut.lut_translock);
-        /* obd_last_committed is used for compatibility
-         * with other lustre recovery code */
-        obd->obd_last_committed = mdt->mdt_lut.lut_last_transno;
-        cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+       spin_lock(&mdt->mdt_lut.lut_translock);
+       /* obd_last_committed is used for compatibility
+        * with other lustre recovery code */
+       obd->obd_last_committed = mdt->mdt_lut.lut_last_transno;
+       spin_unlock(&mdt->mdt_lut.lut_translock);
 
         obd->u.obt.obt_mount_count = mount_count + 1;
         obd->u.obt.obt_instance = (__u32)obd->u.obt.obt_mount_count;
@@ -347,12 +347,12 @@ static int mdt_last_rcvd_update(struct mdt_thread_info *mti,
         ted = &req->rq_export->exp_target_data;
         LASSERT(ted);
 
-        cfs_mutex_lock(&ted->ted_lcd_lock);
-        lcd = ted->ted_lcd;
-        /* if the export has already been disconnected, we have no last_rcvd slot,
-         * update server data with latest transno then */
-        if (lcd == NULL) {
-                cfs_mutex_unlock(&ted->ted_lcd_lock);
+       mutex_lock(&ted->ted_lcd_lock);
+       lcd = ted->ted_lcd;
+       /* if the export has already been disconnected, we have no last_rcvd
+        * slot, update server data with latest transno then */
+       if (lcd == NULL) {
+               mutex_unlock(&ted->ted_lcd_lock);
                 CWARN("commit transaction for disconnected client %s: rc %d\n",
                       req->rq_export->exp_client_uuid.uuid, rc);
                err = tgt_server_data_write(mti->mti_env, &mdt->mdt_lut, th);
@@ -371,11 +371,11 @@ static int mdt_last_rcvd_update(struct mdt_thread_info *mti,
                                        lcd->lcd_last_close_transno,
                                        mti->mti_transno, req_is_replay(req));
                                 if (req_is_replay(req)) {
-                                        cfs_spin_lock(&req->rq_export->exp_lock);
-                                        req->rq_export->exp_vbr_failed = 1;
-                                        cfs_spin_unlock(&req->rq_export->exp_lock);
-                                }
-                                cfs_mutex_unlock(&ted->ted_lcd_lock);
+                                       spin_lock(&req->rq_export->exp_lock);
+                                       req->rq_export->exp_vbr_failed = 1;
+                                       spin_unlock(&req->rq_export->exp_lock);
+                               }
+                               mutex_unlock(&ted->ted_lcd_lock);
                                 RETURN(req_is_replay(req) ? -EOVERFLOW : 0);
                         }
                         lcd->lcd_last_close_transno = mti->mti_transno;
@@ -399,11 +399,11 @@ static int mdt_last_rcvd_update(struct mdt_thread_info *mti,
                                        lcd->lcd_last_transno,
                                        mti->mti_transno, req_is_replay(req));
                                 if (req_is_replay(req)) {
-                                        cfs_spin_lock(&req->rq_export->exp_lock);
-                                        req->rq_export->exp_vbr_failed = 1;
-                                        cfs_spin_unlock(&req->rq_export->exp_lock);
-                                }
-                                cfs_mutex_unlock(&ted->ted_lcd_lock);
+                                       spin_lock(&req->rq_export->exp_lock);
+                                       req->rq_export->exp_vbr_failed = 1;
+                                       spin_unlock(&req->rq_export->exp_lock);
+                               }
+                               mutex_unlock(&ted->ted_lcd_lock);
                                 RETURN(req_is_replay(req) ? -EOVERFLOW : 0);
                         }
                         lcd->lcd_last_transno = mti->mti_transno;
@@ -424,31 +424,31 @@ static int mdt_last_rcvd_update(struct mdt_thread_info *mti,
                struct lu_target        *tg = &mdt->mdt_lut;
                bool                     update = false;
 
-               cfs_mutex_unlock(&ted->ted_lcd_lock);
+               mutex_unlock(&ted->ted_lcd_lock);
                err = 0;
 
                /* All operations performed by LW clients are synchronous and
                 * we store the committed transno in the last_rcvd header */
-               cfs_spin_lock(&tg->lut_translock);
+               spin_lock(&tg->lut_translock);
                if (mti->mti_transno > tg->lut_lsd.lsd_last_transno) {
                        tg->lut_lsd.lsd_last_transno = mti->mti_transno;
                        update = true;
                }
-               cfs_spin_unlock(&tg->lut_translock);
+               spin_unlock(&tg->lut_translock);
 
                if (update)
                        err = tgt_server_data_write(mti->mti_env, tg, th);
        } else if (off <= 0) {
                CERROR("%s: client idx %d has offset %lld\n",
                       mdt2obd_dev(mdt)->obd_name, ted->ted_lr_idx, off);
-               cfs_mutex_unlock(&ted->ted_lcd_lock);
+               mutex_unlock(&ted->ted_lcd_lock);
                err = -EINVAL;
        } else {
                err = tgt_client_data_write(mti->mti_env, &mdt->mdt_lut, lcd,
                                            &off, th);
-               cfs_mutex_unlock(&ted->ted_lcd_lock);
-        }
-        RETURN(err);
+               mutex_unlock(&ted->ted_lcd_lock);
+       }
+       RETURN(err);
 }
 
 extern struct lu_context_key mdt_thread_key;
@@ -509,7 +509,7 @@ static int mdt_txn_stop_cb(const struct lu_env *env,
         }
 
         mti->mti_has_trans = 1;
-        cfs_spin_lock(&mdt->mdt_lut.lut_translock);
+       spin_lock(&mdt->mdt_lut.lut_translock);
         if (txn->th_result != 0) {
                 if (mti->mti_transno != 0) {
                        CERROR("Replay transno "LPU64" failed: rc %d\n",
@@ -523,7 +523,7 @@ static int mdt_txn_stop_cb(const struct lu_env *env,
                 if (mti->mti_transno > mdt->mdt_lut.lut_last_transno)
                         mdt->mdt_lut.lut_last_transno = mti->mti_transno;
         }
-        cfs_spin_unlock(&mdt->mdt_lut.lut_translock);
+       spin_unlock(&mdt->mdt_lut.lut_translock);
         /* sometimes the reply message has not been successfully packed */
         LASSERT(req != NULL && req->rq_repmsg != NULL);
 
@@ -594,7 +594,7 @@ static void mdt_steal_ack_locks(struct ptlrpc_request *req)
         int                        i;
 
         /* CAVEAT EMPTOR: spinlock order */
-        cfs_spin_lock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
         cfs_list_for_each (tmp, &exp->exp_outstanding_replies) {
                 oldrep = cfs_list_entry(tmp, struct ptlrpc_reply_state,
                                         rs_exp_list);
@@ -609,7 +609,7 @@ static void mdt_steal_ack_locks(struct ptlrpc_request *req)
                                 oldrep->rs_opc);
 
                svcpt = oldrep->rs_svcpt;
-               cfs_spin_lock(&svcpt->scp_rep_lock);
+               spin_lock(&svcpt->scp_rep_lock);
 
                 cfs_list_del_init (&oldrep->rs_exp_list);
 
@@ -625,14 +625,14 @@ static void mdt_steal_ack_locks(struct ptlrpc_request *req)
                 oldrep->rs_nlocks = 0;
 
                 DEBUG_REQ(D_HA, req, "stole locks for");
-                cfs_spin_lock(&oldrep->rs_lock);
-                ptlrpc_schedule_difficult_reply (oldrep);
-                cfs_spin_unlock(&oldrep->rs_lock);
-
-               cfs_spin_unlock(&svcpt->scp_rep_lock);
-                break;
-        }
-        cfs_spin_unlock(&exp->exp_lock);
+               spin_lock(&oldrep->rs_lock);
+               ptlrpc_schedule_difficult_reply(oldrep);
+               spin_unlock(&oldrep->rs_lock);
+
+               spin_unlock(&svcpt->scp_rep_lock);
+               break;
+       }
+       spin_unlock(&exp->exp_lock);
 }
 
 /**
@@ -764,17 +764,17 @@ static void mdt_reconstruct_setattr(struct mdt_thread_info *mti,
 
                 repbody = req_capsule_server_get(mti->mti_pill, &RMF_MDT_BODY);
                 repbody->ioepoch = obj->mot_ioepoch;
-                cfs_spin_lock(&med->med_open_lock);
-                cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
-                        if (mfd->mfd_xid == req->rq_xid)
-                                break;
-                }
-                LASSERT(&mfd->mfd_list != &med->med_open_head);
-                cfs_spin_unlock(&med->med_open_lock);
-                repbody->handle.cookie = mfd->mfd_handle.h_cookie;
-        }
+               spin_lock(&med->med_open_lock);
+               cfs_list_for_each_entry(mfd, &med->med_open_head, mfd_list) {
+                       if (mfd->mfd_xid == req->rq_xid)
+                               break;
+               }
+               LASSERT(&mfd->mfd_list != &med->med_open_head);
+               spin_unlock(&med->med_open_lock);
+               repbody->handle.cookie = mfd->mfd_handle.h_cookie;
+       }
 
-        mdt_object_put(mti->mti_env, obj);
+       mdt_object_put(mti->mti_env, obj);
 }
 
 typedef void (*mdt_reconstructor)(struct mdt_thread_info *mti,
index c36ea7a..51ef90e 100644 (file)
@@ -121,19 +121,19 @@ static int mdt_version_check(struct ptlrpc_request *req,
         /** Sanity check for malformed buffers */
         if (pre_ver == NULL) {
                 CERROR("No versions in request buffer\n");
-                cfs_spin_lock(&req->rq_export->exp_lock);
-                req->rq_export->exp_vbr_failed = 1;
-                cfs_spin_unlock(&req->rq_export->exp_lock);
-                RETURN(-EOVERFLOW);
-        } else if (pre_ver[idx] != version) {
-                CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
-                       pre_ver[idx], version);
-                cfs_spin_lock(&req->rq_export->exp_lock);
-                req->rq_export->exp_vbr_failed = 1;
-                cfs_spin_unlock(&req->rq_export->exp_lock);
-                RETURN(-EOVERFLOW);
-        }
-        RETURN(0);
+               spin_lock(&req->rq_export->exp_lock);
+               req->rq_export->exp_vbr_failed = 1;
+               spin_unlock(&req->rq_export->exp_lock);
+               RETURN(-EOVERFLOW);
+       } else if (pre_ver[idx] != version) {
+               CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
+                      pre_ver[idx], version);
+               spin_lock(&req->rq_export->exp_lock);
+               req->rq_export->exp_vbr_failed = 1;
+               spin_unlock(&req->rq_export->exp_lock);
+               RETURN(-EOVERFLOW);
+       }
+       RETURN(0);
 }
 
 /**
@@ -466,9 +466,9 @@ static int mdt_reint_setattr(struct mdt_thread_info *info,
                 mfd->mfd_object = mo;
                 mfd->mfd_xid = req->rq_xid;
 
-                cfs_spin_lock(&med->med_open_lock);
-                cfs_list_add(&mfd->mfd_list, &med->med_open_head);
-                cfs_spin_unlock(&med->med_open_lock);
+               spin_lock(&med->med_open_lock);
+               cfs_list_add(&mfd->mfd_list, &med->med_open_head);
+               spin_unlock(&med->med_open_lock);
                 repbody->handle.cookie = mfd->mfd_handle.h_cookie;
         }
 
@@ -479,10 +479,10 @@ static int mdt_reint_setattr(struct mdt_thread_info *info,
                 LASSERT(mdt_conn_flags(info) & OBD_CONNECT_SOM);
                 LASSERT(info->mti_ioepoch);
 
-                cfs_spin_lock(&med->med_open_lock);
-                mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
-                if (mfd == NULL) {
-                        cfs_spin_unlock(&med->med_open_lock);
+               spin_lock(&med->med_open_lock);
+               mfd = mdt_handle2mfd(info, &info->mti_ioepoch->handle);
+               if (mfd == NULL) {
+                       spin_unlock(&med->med_open_lock);
                         CDEBUG(D_INODE, "no handle for file close: "
                                "fid = "DFID": cookie = "LPX64"\n",
                                PFID(info->mti_rr.rr_fid1),
@@ -494,7 +494,7 @@ static int mdt_reint_setattr(struct mdt_thread_info *info,
 
                 class_handle_unhash(&mfd->mfd_handle);
                 cfs_list_del_init(&mfd->mfd_list);
-                cfs_spin_unlock(&med->med_open_lock);
+               spin_unlock(&med->med_open_lock);
 
                 mdt_mfd_close(info, mfd);
        } else if ((ma->ma_valid & MA_INODE) && ma->ma_attr.la_valid) {
index e293fa0..e4ed97c 100644 (file)
@@ -142,7 +142,7 @@ static void config_log_put(struct config_llog_data *cld)
         /* spinlock to make sure no item with 0 refcount in the list */
         if (cfs_atomic_dec_and_lock(&cld->cld_refcount, &config_list_lock)) {
                 cfs_list_del(&cld->cld_list_chain);
-                cfs_spin_unlock(&config_list_lock);
+               spin_unlock(&config_list_lock);
 
                 CDEBUG(D_MGC, "dropping config log %s\n", cld->cld_logname);
 
@@ -173,7 +173,7 @@ struct config_llog_data *config_log_find(char *logname,
         LASSERT(logname != NULL);
 
         instance = cfg ? cfg->cfg_instance : NULL;
-        cfs_spin_lock(&config_list_lock);
+       spin_lock(&config_list_lock);
         cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
                 /* check if instance equals */
                 if (instance != cld->cld_cfg.cfg_instance)
@@ -189,8 +189,8 @@ struct config_llog_data *config_log_find(char *logname,
                 cfs_atomic_inc(&found->cld_refcount);
                 LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
         }
-        cfs_spin_unlock(&config_list_lock);
-        RETURN(found);
+       spin_unlock(&config_list_lock);
+       RETURN(found);
 }
 
 static
@@ -216,7 +216,7 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd,
                 cld->cld_cfg = *cfg;
        else
                cld->cld_cfg.cfg_callback = class_config_llog_handler;
-        cfs_mutex_init(&cld->cld_lock);
+       mutex_init(&cld->cld_lock);
         cld->cld_cfg.cfg_last_idx = 0;
         cld->cld_cfg.cfg_flags = 0;
         cld->cld_cfg.cfg_sb = sb;
@@ -233,9 +233,9 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd,
 
         rc = mgc_logname2resid(logname, &cld->cld_resid, type);
 
-        cfs_spin_lock(&config_list_lock);
-        cfs_list_add(&cld->cld_list_chain, &config_llog_list);
-        cfs_spin_unlock(&config_list_lock);
+       spin_lock(&config_list_lock);
+       cfs_list_add(&cld->cld_list_chain, &config_llog_list);
+       spin_unlock(&config_list_lock);
 
         if (rc) {
                 config_log_put(cld);
@@ -351,7 +351,7 @@ static int config_log_add(struct obd_device *obd, char *logname,
         RETURN(0);
 }
 
-CFS_DEFINE_MUTEX(llog_process_lock);
+DEFINE_MUTEX(llog_process_lock);
 
 /** Stop watching for updates on this log.
  */
@@ -367,7 +367,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
         if (cld == NULL)
                 RETURN(-ENOENT);
 
-        cfs_mutex_lock(&cld->cld_lock);
+       mutex_lock(&cld->cld_lock);
         /*
          * if cld_stopping is set, it means we didn't start the log thus
          * not owning the start ref. this can happen after previous umount:
@@ -376,7 +376,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
          * calling start_log.
          */
         if (unlikely(cld->cld_stopping)) {
-                cfs_mutex_unlock(&cld->cld_lock);
+               mutex_unlock(&cld->cld_lock);
                 /* drop the ref from the find */
                 config_log_put(cld);
                 RETURN(rc);
@@ -386,19 +386,19 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
 
         cld_recover = cld->cld_recover;
         cld->cld_recover = NULL;
-        cfs_mutex_unlock(&cld->cld_lock);
+       mutex_unlock(&cld->cld_lock);
 
-        if (cld_recover) {
-                cfs_mutex_lock(&cld_recover->cld_lock);
-                cld_recover->cld_stopping = 1;
-                cfs_mutex_unlock(&cld_recover->cld_lock);
-                config_log_put(cld_recover);
-        }
+       if (cld_recover) {
+               mutex_lock(&cld_recover->cld_lock);
+               cld_recover->cld_stopping = 1;
+               mutex_unlock(&cld_recover->cld_lock);
+               config_log_put(cld_recover);
+       }
 
-        cfs_spin_lock(&config_list_lock);
-        cld_sptlrpc = cld->cld_sptlrpc;
-        cld->cld_sptlrpc = NULL;
-        cfs_spin_unlock(&config_list_lock);
+       spin_lock(&config_list_lock);
+       cld_sptlrpc = cld->cld_sptlrpc;
+       cld->cld_sptlrpc = NULL;
+       spin_unlock(&config_list_lock);
 
         if (cld_sptlrpc)
                 config_log_put(cld_sptlrpc);
@@ -427,18 +427,18 @@ int lprocfs_mgc_rd_ir_state(char *page, char **start, off_t off,
                      OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
         rc += snprintf(page + rc, count - rc, "client_state:\n");
 
-        cfs_spin_lock(&config_list_lock);
-        cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
-                if (cld->cld_recover == NULL)
-                        continue;
-                rc += snprintf(page + rc, count - rc,
-                               "    - { client: %s, nidtbl_version: %u }\n",
-                               cld->cld_logname,
-                               cld->cld_recover->cld_cfg.cfg_last_idx);
-        }
-        cfs_spin_unlock(&config_list_lock);
+       spin_lock(&config_list_lock);
+       cfs_list_for_each_entry(cld, &config_llog_list, cld_list_chain) {
+               if (cld->cld_recover == NULL)
+                       continue;
+               rc += snprintf(page + rc, count - rc,
+                              "    - { client: %s, nidtbl_version: %u }\n",
+                              cld->cld_logname,
+                              cld->cld_recover->cld_cfg.cfg_last_idx);
+       }
+       spin_unlock(&config_list_lock);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 /* reenqueue any lost locks */
@@ -448,7 +448,7 @@ int lprocfs_mgc_rd_ir_state(char *page, char **start, off_t off,
 #define RQ_STOP    0x8
 static int                    rq_state = 0;
 static cfs_waitq_t            rq_waitq;
-static CFS_DECLARE_COMPLETION(rq_exit);
+static DECLARE_COMPLETION(rq_exit);
 
 static void do_requeue(struct config_llog_data *cld)
 {
@@ -458,7 +458,7 @@ static void do_requeue(struct config_llog_data *cld)
         /* Do not run mgc_process_log on a disconnected export or an
            export which is being disconnected. Take the client
            semaphore to make the check non-racy. */
-        cfs_down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+       down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
         if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
                 CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
                 mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
@@ -466,7 +466,7 @@ static void do_requeue(struct config_llog_data *cld)
                 CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
                        cld->cld_logname);
         }
-        cfs_up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+       up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
 
         EXIT;
 }
@@ -489,18 +489,18 @@ static int mgc_requeue_thread(void *data)
         CDEBUG(D_MGC, "Starting requeue thread\n");
 
         /* Keep trying failed locks periodically */
-        cfs_spin_lock(&config_list_lock);
-        rq_state |= RQ_RUNNING;
-        while (1) {
-                struct l_wait_info lwi;
-                struct config_llog_data *cld, *cld_prev;
-                int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
-                int stopped = !!(rq_state & RQ_STOP);
-                int to;
-
-                /* Any new or requeued lostlocks will change the state */
-                rq_state &= ~(RQ_NOW | RQ_LATER);
-                cfs_spin_unlock(&config_list_lock);
+       spin_lock(&config_list_lock);
+       rq_state |= RQ_RUNNING;
+       while (1) {
+               struct l_wait_info lwi;
+               struct config_llog_data *cld, *cld_prev;
+               int rand = cfs_rand() & MGC_TIMEOUT_RAND_CENTISEC;
+               int stopped = !!(rq_state & RQ_STOP);
+               int to;
+
+               /* Any new or requeued lostlocks will change the state */
+               rq_state &= ~(RQ_NOW | RQ_LATER);
+               spin_unlock(&config_list_lock);
 
                 /* Always wait a few seconds to allow the server who
                    caused the lock revocation to finish its setup, plus some
@@ -520,13 +520,13 @@ static int mgc_requeue_thread(void *data)
                  */
                 cld_prev = NULL;
 
-                cfs_spin_lock(&config_list_lock);
-                cfs_list_for_each_entry(cld, &config_llog_list,
-                                        cld_list_chain) {
-                        if (!cld->cld_lostlock)
-                                continue;
+               spin_lock(&config_list_lock);
+               cfs_list_for_each_entry(cld, &config_llog_list,
+                                       cld_list_chain) {
+                       if (!cld->cld_lostlock)
+                               continue;
 
-                        cfs_spin_unlock(&config_list_lock);
+                       spin_unlock(&config_list_lock);
 
                         LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
 
@@ -540,33 +540,33 @@ static int mgc_requeue_thread(void *data)
                         if (likely(!stopped))
                                 do_requeue(cld);
 
-                        cfs_spin_lock(&config_list_lock);
-                }
-                cfs_spin_unlock(&config_list_lock);
-                if (cld_prev)
-                        config_log_put(cld_prev);
-
-                /* break after scanning the list so that we can drop
-                 * refcount to losing lock clds */
-                if (unlikely(stopped)) {
-                        cfs_spin_lock(&config_list_lock);
-                        break;
-                }
+                       spin_lock(&config_list_lock);
+               }
+               spin_unlock(&config_list_lock);
+               if (cld_prev)
+                       config_log_put(cld_prev);
+
+               /* break after scanning the list so that we can drop
+                * refcount to losing lock clds */
+               if (unlikely(stopped)) {
+                       spin_lock(&config_list_lock);
+                       break;
+               }
 
-                /* Wait a bit to see if anyone else needs a requeue */
-                lwi = (struct l_wait_info) { 0 };
-                l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
-                             &lwi);
-                cfs_spin_lock(&config_list_lock);
-        }
-        /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
-        rq_state &= ~RQ_RUNNING;
-        cfs_spin_unlock(&config_list_lock);
+               /* Wait a bit to see if anyone else needs a requeue */
+               lwi = (struct l_wait_info) { 0 };
+               l_wait_event(rq_waitq, rq_state & (RQ_NOW | RQ_STOP),
+                            &lwi);
+               spin_lock(&config_list_lock);
+       }
+       /* spinlock and while guarantee RQ_NOW and RQ_LATER are not set */
+       rq_state &= ~RQ_RUNNING;
+       spin_unlock(&config_list_lock);
 
-        cfs_complete(&rq_exit);
+       complete(&rq_exit);
 
-        CDEBUG(D_MGC, "Ending requeue thread\n");
-        RETURN(rc);
+       CDEBUG(D_MGC, "Ending requeue thread\n");
+       RETURN(rc);
 }
 
 /* Add a cld to the list to requeue.  Start the requeue thread if needed.
@@ -580,28 +580,28 @@ static void mgc_requeue_add(struct config_llog_data *cld)
                cld->cld_stopping, rq_state);
         LASSERT(cfs_atomic_read(&cld->cld_refcount) > 0);
 
-        cfs_mutex_lock(&cld->cld_lock);
-        if (cld->cld_stopping || cld->cld_lostlock) {
-                cfs_mutex_unlock(&cld->cld_lock);
-                RETURN_EXIT;
-        }
-        /* this refcount will be released in mgc_requeue_thread. */
-        config_log_get(cld);
-        cld->cld_lostlock = 1;
-        cfs_mutex_unlock(&cld->cld_lock);
-
-        /* Hold lock for rq_state */
-        cfs_spin_lock(&config_list_lock);
-        if (rq_state & RQ_STOP) {
-                cfs_spin_unlock(&config_list_lock);
-                cld->cld_lostlock = 0;
-                config_log_put(cld);
-        } else {
-                rq_state |= RQ_NOW;
-                cfs_spin_unlock(&config_list_lock);
-                cfs_waitq_signal(&rq_waitq);
-        }
-        EXIT;
+       mutex_lock(&cld->cld_lock);
+       if (cld->cld_stopping || cld->cld_lostlock) {
+               mutex_unlock(&cld->cld_lock);
+               RETURN_EXIT;
+       }
+       /* this refcount will be released in mgc_requeue_thread. */
+       config_log_get(cld);
+       cld->cld_lostlock = 1;
+       mutex_unlock(&cld->cld_lock);
+
+       /* Hold lock for rq_state */
+       spin_lock(&config_list_lock);
+       if (rq_state & RQ_STOP) {
+               spin_unlock(&config_list_lock);
+               cld->cld_lostlock = 0;
+               config_log_put(cld);
+       } else {
+               rq_state |= RQ_NOW;
+               spin_unlock(&config_list_lock);
+               cfs_waitq_signal(&rq_waitq);
+       }
+       EXIT;
 }
 
 /********************** class fns **********************/
@@ -621,15 +621,15 @@ static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb,
         LASSERT(lsi->lsi_srv_mnt == mnt);
 
         /* The mgc fs exclusion sem. Only one fs can be setup at a time. */
-        cfs_down(&cli->cl_mgc_sem);
+       down(&cli->cl_mgc_sem);
 
         cfs_cleanup_group_info();
 
        obd->obd_fsops = fsfilt_get_ops(lsi->lsi_fstype);
         if (IS_ERR(obd->obd_fsops)) {
-                cfs_up(&cli->cl_mgc_sem);
-               CERROR("No fstype %s rc=%ld\n", lsi->lsi_fstype,
-                       PTR_ERR(obd->obd_fsops));
+               up(&cli->cl_mgc_sem);
+               CERROR("%s: No fstype %s: rc = %ld\n", lsi->lsi_fstype,
+                      obd->obd_name, PTR_ERR(obd->obd_fsops));
                 RETURN(PTR_ERR(obd->obd_fsops));
         }
 
@@ -670,7 +670,7 @@ err_ops:
         fsfilt_put_ops(obd->obd_fsops);
         obd->obd_fsops = NULL;
         cli->cl_mgc_vfsmnt = NULL;
-        cfs_up(&cli->cl_mgc_sem);
+       up(&cli->cl_mgc_sem);
         RETURN(err);
 }
 
@@ -695,7 +695,7 @@ static int mgc_fs_cleanup(struct obd_device *obd)
         if (obd->obd_fsops)
                 fsfilt_put_ops(obd->obd_fsops);
 
-        cfs_up(&cli->cl_mgc_sem);
+       up(&cli->cl_mgc_sem);
 
         RETURN(rc);
 }
@@ -713,14 +713,14 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                 if (cfs_atomic_dec_and_test(&mgc_count)) {
                         int running;
                         /* stop requeue thread */
-                        cfs_spin_lock(&config_list_lock);
-                        running = rq_state & RQ_RUNNING;
-                        if (running)
-                                rq_state |= RQ_STOP;
-                        cfs_spin_unlock(&config_list_lock);
-                        if (running) {
-                                cfs_waitq_signal(&rq_waitq);
-                                cfs_wait_for_completion(&rq_exit);
+                       spin_lock(&config_list_lock);
+                       running = rq_state & RQ_RUNNING;
+                       if (running)
+                               rq_state |= RQ_STOP;
+                       spin_unlock(&config_list_lock);
+                       if (running) {
+                               cfs_waitq_signal(&rq_waitq);
+                               wait_for_completion(&rq_exit);
                         }
                 }
                 obd_cleanup_client_import(obd);
@@ -957,13 +957,13 @@ static int mgc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
 
 static void mgc_notify_active(struct obd_device *unused)
 {
-        /* wakeup mgc_requeue_thread to requeue mgc lock */
-        cfs_spin_lock(&config_list_lock);
-        rq_state |= RQ_NOW;
-        cfs_spin_unlock(&config_list_lock);
-        cfs_waitq_signal(&rq_waitq);
+       /* wakeup mgc_requeue_thread to requeue mgc lock */
+       spin_lock(&config_list_lock);
+       rq_state |= RQ_NOW;
+       spin_unlock(&config_list_lock);
+       cfs_waitq_signal(&rq_waitq);
 
-        /* TODO: Help the MGS rebuild nidtbl. -jay */
+       /* TODO: Help the MGS rebuild nidtbl. -jay */
 }
 
 /* Send target_reg message to MGS */
@@ -1371,10 +1371,10 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
                 pos += sprintf(params, "%s.import=%s", cname, "connection=");
                 uuid = buf + pos;
 
-               cfs_down_read(&obd->u.cli.cl_sem);
+               down_read(&obd->u.cli.cl_sem);
                if (obd->u.cli.cl_import == NULL) {
                        /* client does not connect to the OST yet */
-                       cfs_up_read(&obd->u.cli.cl_sem);
+                       up_read(&obd->u.cli.cl_sem);
                        rc = 0;
                        continue;
                }
@@ -1384,7 +1384,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
                 rc = client_import_find_conn(obd->u.cli.cl_import,
                                              entry->u.nids[0],
                                              (struct obd_uuid *)uuid);
-               cfs_up_read(&obd->u.cli.cl_sem);
+               up_read(&obd->u.cli.cl_sem);
                 if (rc < 0) {
                         CERROR("mgc: cannot find uuid by nid %s\n",
                                libcfs_nid2str(entry->u.nids[0]));
@@ -1466,7 +1466,7 @@ static int mgc_process_recover_log(struct obd_device *obd,
 
 again:
         LASSERT(cld_is_recover(cld));
-        LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
+       LASSERT(mutex_is_locked(&cld->cld_lock));
         req = ptlrpc_request_alloc(class_exp2cliimp(cld->cld_mgcexp),
                                    &RQF_MGS_CONFIG_READ);
         if (req == NULL)
@@ -1720,7 +1720,7 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
         ENTRY;
 
         LASSERT(cld);
-        LASSERT(cfs_mutex_is_locked(&cld->cld_lock));
+       LASSERT(mutex_is_locked(&cld->cld_lock));
 
         /*
          * local copy of sptlrpc log is controlled elsewhere, don't try to
@@ -1835,9 +1835,9 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
            sounds like badness.  It actually might be fine, as long as
            we're not trying to update from the same log
            simultaneously (in which case we should use a per-log sem.) */
-        cfs_mutex_lock(&cld->cld_lock);
-        if (cld->cld_stopping) {
-                cfs_mutex_unlock(&cld->cld_lock);
+       mutex_lock(&cld->cld_lock);
+       if (cld->cld_stopping) {
+               mutex_unlock(&cld->cld_lock);
                 RETURN(0);
         }
 
@@ -1877,7 +1877,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
         CDEBUG(D_MGC, "%s: configuration from log '%s' %sed (%d).\n",
                mgc->obd_name, cld->cld_logname, rc ? "fail" : "succeed", rc);
 
-        cfs_mutex_unlock(&cld->cld_lock);
+       mutex_unlock(&cld->cld_lock);
 
         /* Now drop the lock so MGS can revoke it */
         if (!rcl) {
index 0f7fc26..9b0a4ad 100644 (file)
@@ -134,9 +134,9 @@ static int mgsself_srpc_seq_show(struct seq_file *seq, void *v)
         if (rc)
                goto out;
 
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
         seq_show_srpc_rules(seq, fsdb->fsdb_name, &fsdb->fsdb_srpc_gen);
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
 
 out:
        lu_env_fini(&env);
@@ -216,16 +216,16 @@ static int mgs_live_seq_show(struct seq_file *seq, void *v)
         struct mgs_tgt_srpc_conf *srpc_tgt;
         int i;
 
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
 
         seq_printf(seq, "fsname: %s\n", fsdb->fsdb_name);
         seq_printf(seq, "flags: %#lx     gen: %d\n",
                    fsdb->fsdb_flags, fsdb->fsdb_gen);
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++)
-                 if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
+               if (test_bit(i, fsdb->fsdb_mdt_index_map))
                          seq_printf(seq, "%s-MDT%04x\n", fsdb->fsdb_name, i);
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++)
-                 if (cfs_test_bit(i, fsdb->fsdb_ost_index_map))
+               if (test_bit(i, fsdb->fsdb_ost_index_map))
                          seq_printf(seq, "%s-OST%04x\n", fsdb->fsdb_name, i);
 
         seq_printf(seq, "\nSecure RPC Config Rules:\n");
@@ -242,7 +242,7 @@ static int mgs_live_seq_show(struct seq_file *seq, void *v)
 
         lprocfs_rd_ir_state(seq, fsdb);
 
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
         return 0;
 }
 
index 6e424d4..ce6ada3 100644 (file)
@@ -142,7 +142,7 @@ static int mgs_completion_ast_config(struct ldlm_lock *lock, __u64 flags,
                 struct lustre_handle lockh;
 
                 /* clear the bit before lock put */
-                cfs_clear_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags);
+               clear_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags);
 
                 ldlm_lock2handle(lock, &lockh);
                 ldlm_lock_decref_and_cancel(&lockh, LCK_EX);
@@ -196,7 +196,7 @@ void mgs_revoke_lock(struct mgs_device *mgs, struct fs_db *fsdb, int type)
         switch (type) {
         case CONFIG_T_CONFIG:
                 cp = mgs_completion_ast_config;
-                if (cfs_test_and_set_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags))
+               if (test_and_set_bit(FSDB_REVOKING_LOCK, &fsdb->fsdb_flags))
                         rc = -EALREADY;
                 break;
         case CONFIG_T_RECOVER:
@@ -217,7 +217,7 @@ void mgs_revoke_lock(struct mgs_device *mgs, struct fs_db *fsdb, int type)
                                le64_to_cpu(res_id.name[1]), rc);
 
                         if (type == CONFIG_T_CONFIG)
-                                cfs_clear_bit(FSDB_REVOKING_LOCK,
+                               clear_bit(FSDB_REVOKING_LOCK,
                                               &fsdb->fsdb_flags);
                 }
                 /* lock has been cancelled in completion_ast. */
@@ -506,17 +506,17 @@ static int mgs_connect_check_sptlrpc(struct ptlrpc_request *req)
                 if (rc)
                         return rc;
 
-                cfs_mutex_lock(&fsdb->fsdb_mutex);
-                if (sptlrpc_rule_set_choose(&fsdb->fsdb_srpc_gen,
-                                            LUSTRE_SP_MGC, LUSTRE_SP_MGS,
-                                            req->rq_peer.nid,
-                                            &flvr) == 0) {
-                        /* by defualt allow any flavors */
-                        flvr.sf_rpc = SPTLRPC_FLVR_ANY;
-                }
-                cfs_mutex_unlock(&fsdb->fsdb_mutex);
+               mutex_lock(&fsdb->fsdb_mutex);
+               if (sptlrpc_rule_set_choose(&fsdb->fsdb_srpc_gen,
+                                           LUSTRE_SP_MGC, LUSTRE_SP_MGS,
+                                           req->rq_peer.nid,
+                                           &flvr) == 0) {
+                       /* by defualt allow any flavors */
+                       flvr.sf_rpc = SPTLRPC_FLVR_ANY;
+               }
+               mutex_unlock(&fsdb->fsdb_mutex);
 
-                cfs_spin_lock(&exp->exp_lock);
+               spin_lock(&exp->exp_lock);
 
                 exp->exp_sp_peer = req->rq_sp_from;
                 exp->exp_flvr = flvr;
@@ -529,7 +529,7 @@ static int mgs_connect_check_sptlrpc(struct ptlrpc_request *req)
                         rc = -EACCES;
                 }
 
-                cfs_spin_unlock(&exp->exp_lock);
+               spin_unlock(&exp->exp_lock);
         } else {
                 if (exp->exp_sp_peer != req->rq_sp_from) {
                         CERROR("RPC source %s doesn't match %s\n",
@@ -751,15 +751,15 @@ out:
 
 static inline int mgs_init_export(struct obd_export *exp)
 {
-        struct mgs_export_data *data = &exp->u.eu_mgs_data;
+       struct mgs_export_data *data = &exp->u.eu_mgs_data;
 
-        /* init mgs_export_data for fsc */
-        cfs_spin_lock_init(&data->med_lock);
-        CFS_INIT_LIST_HEAD(&data->med_clients);
+       /* init mgs_export_data for fsc */
+       spin_lock_init(&data->med_lock);
+       CFS_INIT_LIST_HEAD(&data->med_clients);
 
-        cfs_spin_lock(&exp->exp_lock);
-        exp->exp_connecting = 1;
-        cfs_spin_unlock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
+       exp->exp_connecting = 1;
+       spin_unlock(&exp->exp_lock);
 
         /* self-export doesn't need client data and ldlm initialization */
         if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
@@ -1069,9 +1069,9 @@ static int mgs_init0(const struct lu_env *env, struct mgs_device *mgs,
 
        /* Internal mgs setup */
        mgs_init_fsdb_list(mgs);
-       cfs_mutex_init(&mgs->mgs_mutex);
+       mutex_init(&mgs->mgs_mutex);
        mgs->mgs_start_time = cfs_time_current_sec();
-       cfs_spin_lock_init(&mgs->mgs_lock);
+       spin_lock_init(&mgs->mgs_lock);
 
        /* Setup proc */
        lprocfs_mgs_init_vars(&lvars);
index 9ad6174..89a9261 100644 (file)
@@ -97,7 +97,7 @@ struct mgs_fsc {
 struct mgs_nidtbl {
         struct fs_db *mn_fsdb;
         struct file  *mn_version_file;
-        cfs_mutex_t   mn_lock;
+       struct mutex    mn_lock;
         u64           mn_version;
         int           mn_nr_targets;
         cfs_list_t    mn_targets;
@@ -121,7 +121,7 @@ struct mgs_tgt_srpc_conf {
 struct fs_db {
         char              fsdb_name[9];
         cfs_list_t        fsdb_list;           /* list of databases */
-        cfs_mutex_t       fsdb_mutex;
+       struct mutex      fsdb_mutex;
         void             *fsdb_ost_index_map;  /* bitmap of used indicies */
         void             *fsdb_mdt_index_map;  /* bitmap of used indicies */
         int               fsdb_mdt_count;
@@ -145,7 +145,7 @@ struct fs_db {
         /* async thread to notify clients */
        struct mgs_device   *fsdb_mgs;
         cfs_waitq_t          fsdb_notify_waitq;
-        cfs_completion_t     fsdb_notify_comp;
+       struct completion       fsdb_notify_comp;
         cfs_time_t           fsdb_notify_start;
         cfs_atomic_t         fsdb_notify_phase;
         volatile int         fsdb_notify_async:1,
@@ -164,13 +164,13 @@ struct mgs_device {
        struct dt_object                *mgs_configs_dir;
        struct dt_object                *mgs_nidtbl_dir;
        cfs_list_t                       mgs_fs_db_list;
-       cfs_spinlock_t                   mgs_lock; /* covers mgs_fs_db_list */
+       spinlock_t                       mgs_lock; /* covers mgs_fs_db_list */
        cfs_proc_dir_entry_t            *mgs_proc_live;
        cfs_proc_dir_entry_t            *mgs_proc_mntdev;
        cfs_time_t                       mgs_start_time;
        struct obd_device               *mgs_obd;
        struct local_oid_storage        *mgs_los;
-       cfs_mutex_t                      mgs_mutex;
+       struct mutex                     mgs_mutex;
 };
 
 /* this is a top object */
index acc2dc9..d20b59e 100644 (file)
@@ -198,7 +198,7 @@ static int mgs_fsdb_handler(const struct lu_env *env, struct llog_handle *llh,
                 CDEBUG(D_MGS, "OST index for %s is %u (%s)\n",
                        lustre_cfg_string(lcfg, 1), index,
                        lustre_cfg_string(lcfg, 2));
-                cfs_set_bit(index, fsdb->fsdb_ost_index_map);
+               set_bit(index, fsdb->fsdb_ost_index_map);
         }
 
         /* Figure out mdt indicies */
@@ -214,7 +214,7 @@ static int mgs_fsdb_handler(const struct lu_env *env, struct llog_handle *llh,
                 }
                 rc = 0;
                 CDEBUG(D_MGS, "MDT index is %u\n", index);
-                cfs_set_bit(index, fsdb->fsdb_mdt_index_map);
+               set_bit(index, fsdb->fsdb_mdt_index_map);
                 fsdb->fsdb_mdt_count ++;
         }
 
@@ -230,13 +230,13 @@ static int mgs_fsdb_handler(const struct lu_env *env, struct llog_handle *llh,
         /*
          * compat to 1.8, check osc name used by MDT0 to OSTs, bz18548.
          */
-        if (!cfs_test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags) &&
+       if (!test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags) &&
             lcfg->lcfg_command == LCFG_ATTACH &&
             strcmp(lustre_cfg_string(lcfg, 1), LUSTRE_OSC_NAME) == 0) {
                 if (OBD_OCD_VERSION_MAJOR(d->ver) == 1 &&
                     OBD_OCD_VERSION_MINOR(d->ver) <= 8) {
                         CWARN("MDT using 1.8 OSC name scheme\n");
-                        cfs_set_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags);
+                       set_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags);
                 }
         }
 
@@ -280,7 +280,7 @@ static int mgs_get_fsdb_from_llog(const struct lu_env *env,
                GOTO(out_close, rc);
 
        if (llog_get_size(loghandle) <= 1)
-               cfs_set_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags);
+               set_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags);
 
        rc = llog_process(env, loghandle, mgs_fsdb_handler, (void *)&d, NULL);
        CDEBUG(D_INFO, "get_db = %d\n", rc);
@@ -345,12 +345,12 @@ static struct fs_db *mgs_new_fsdb(const struct lu_env *env,
                 RETURN(NULL);
 
         strcpy(fsdb->fsdb_name, fsname);
-        cfs_mutex_init(&fsdb->fsdb_mutex);
-        cfs_set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
+       mutex_init(&fsdb->fsdb_mutex);
+       set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
        fsdb->fsdb_gen = 1;
 
         if (strcmp(fsname, MGSSELF_NAME) == 0) {
-                cfs_set_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags);
+               set_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags);
         } else {
                 OBD_ALLOC(fsdb->fsdb_ost_index_map, INDEX_MAP_SIZE);
                 OBD_ALLOC(fsdb->fsdb_mdt_index_map, INDEX_MAP_SIZE);
@@ -389,7 +389,7 @@ err:
 static void mgs_free_fsdb(struct mgs_device *mgs, struct fs_db *fsdb)
 {
         /* wait for anyone with the sem */
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
        lproc_mgs_del_live(mgs, fsdb);
         cfs_list_del(&fsdb->fsdb_list);
 
@@ -403,7 +403,7 @@ static void mgs_free_fsdb(struct mgs_device *mgs, struct fs_db *fsdb)
         name_destroy(&fsdb->fsdb_clilov);
         name_destroy(&fsdb->fsdb_clilmv);
         mgs_free_fsdb_srpc(fsdb);
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
         OBD_FREE_PTR(fsdb);
 }
 
@@ -417,12 +417,12 @@ int mgs_cleanup_fsdb_list(struct mgs_device *mgs)
 {
         struct fs_db *fsdb;
         cfs_list_t *tmp, *tmp2;
-        cfs_mutex_lock(&mgs->mgs_mutex);
+       mutex_lock(&mgs->mgs_mutex);
         cfs_list_for_each_safe(tmp, tmp2, &mgs->mgs_fs_db_list) {
                 fsdb = cfs_list_entry(tmp, struct fs_db, fsdb_list);
                mgs_free_fsdb(mgs, fsdb);
         }
-        cfs_mutex_unlock(&mgs->mgs_mutex);
+       mutex_unlock(&mgs->mgs_mutex);
         return 0;
 }
 
@@ -434,10 +434,10 @@ int mgs_find_or_make_fsdb(const struct lu_env *env,
         int rc = 0;
 
        ENTRY;
-        cfs_mutex_lock(&mgs->mgs_mutex);
+       mutex_lock(&mgs->mgs_mutex);
        fsdb = mgs_find_fsdb(mgs, name);
         if (fsdb) {
-                cfs_mutex_unlock(&mgs->mgs_mutex);
+               mutex_unlock(&mgs->mgs_mutex);
                 *dbh = fsdb;
                RETURN(0);
         }
@@ -446,12 +446,12 @@ int mgs_find_or_make_fsdb(const struct lu_env *env,
        fsdb = mgs_new_fsdb(env, mgs, name);
        /* lock fsdb_mutex until the db is loaded from llogs */
        if (fsdb)
-               cfs_mutex_lock(&fsdb->fsdb_mutex);
-        cfs_mutex_unlock(&mgs->mgs_mutex);
+               mutex_lock(&fsdb->fsdb_mutex);
+       mutex_unlock(&mgs->mgs_mutex);
         if (!fsdb)
                RETURN(-ENOMEM);
 
-        if (!cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
+       if (!test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
                 /* populate the db from the client llog */
                rc = mgs_get_fsdb_from_llog(env, mgs, fsdb);
                 if (rc) {
@@ -467,13 +467,13 @@ int mgs_find_or_make_fsdb(const struct lu_env *env,
                GOTO(out_free, rc);
         }
 
-       cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
         *dbh = fsdb;
 
         RETURN(0);
 
 out_free:
-       cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
        mgs_free_fsdb(mgs, fsdb);
        return rc;
 }
@@ -498,7 +498,7 @@ int mgs_check_index(const struct lu_env *env,
                 RETURN(rc);
         }
 
-        if (cfs_test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags))
+       if (test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags))
                 RETURN(-1);
 
         if (mti->mti_flags & LDD_F_SV_TYPE_OST)
@@ -508,7 +508,7 @@ int mgs_check_index(const struct lu_env *env,
         else
                 RETURN(-EINVAL);
 
-        if (cfs_test_bit(mti->mti_stripe_index, imap))
+       if (test_bit(mti->mti_stripe_index, imap))
                 RETURN(1);
         RETURN(0);
 }
@@ -517,7 +517,7 @@ static __inline__ int next_index(void *index_map, int map_len)
 {
         int i;
         for (i = 0; i < map_len * 8; i++)
-                 if (!cfs_test_bit(i, index_map)) {
+               if (!test_bit(i, index_map)) {
                          return i;
                  }
         CERROR("max index %d exceeded.\n", i);
@@ -543,7 +543,7 @@ static int mgs_set_index(const struct lu_env *env,
                 RETURN(rc);
         }
 
-       cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
         if (mti->mti_flags & LDD_F_SV_TYPE_OST) {
                 imap = fsdb->fsdb_ost_index_map;
         } else if (mti->mti_flags & LDD_F_SV_TYPE_MDT) {
@@ -574,7 +574,7 @@ static int mgs_set_index(const struct lu_env *env,
                GOTO(out_up, rc = -ERANGE);
         }
 
-        if (cfs_test_bit(mti->mti_stripe_index, imap)) {
+       if (test_bit(mti->mti_stripe_index, imap)) {
                 if ((mti->mti_flags & LDD_F_VIRGIN) &&
                     !(mti->mti_flags & LDD_F_WRITECONF)) {
                         LCONSOLE_ERROR_MSG(0x140, "Server %s requested index "
@@ -590,9 +590,9 @@ static int mgs_set_index(const struct lu_env *env,
                 }
         }
 
-        cfs_set_bit(mti->mti_stripe_index, imap);
-        cfs_clear_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags);
-       cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       set_bit(mti->mti_stripe_index, imap);
+       clear_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags);
+       mutex_unlock(&fsdb->fsdb_mutex);
        server_make_name(mti->mti_flags & ~(LDD_F_VIRGIN | LDD_F_WRITECONF),
                         mti->mti_stripe_index, mti->mti_fsname, mti->mti_svname);
 
@@ -601,7 +601,7 @@ static int mgs_set_index(const struct lu_env *env,
 
         RETURN(0);
 out_up:
-       cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
        return rc;
 }
 
@@ -680,7 +680,7 @@ static int mgs_modify(const struct lu_env *env, struct mgs_device *mgs,
 
         ENTRY;
 
-       LASSERT(cfs_mutex_is_locked(&fsdb->fsdb_mutex));
+       LASSERT(mutex_is_locked(&fsdb->fsdb_mutex));
        CDEBUG(D_MGS, "modify %s/%s/%s fl=%x\n", logname, devname, comment,
               flags);
 
@@ -1706,7 +1706,7 @@ static int name_create_mdt_and_lov(char **logname, char **lovname,
        if (rc)
                return rc;
         /* COMPAT_180 */
-        if (i == 0 && cfs_test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
+       if (i == 0 && test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
                rc = name_create(lovname, fsdb->fsdb_name, "-mdtlov");
         else
                rc = name_create(lovname, *logname, "-mdtlov");
@@ -1722,7 +1722,7 @@ static inline int name_create_mdt_osc(char **oscname, char *ostname,
 {
         char suffix[16];
 
-        if (i == 0 && cfs_test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
+       if (i == 0 && test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
                 sprintf(suffix, "-osc");
         else
                 sprintf(suffix, "-osc-MDT%04x", i);
@@ -1814,7 +1814,7 @@ static int mgs_write_log_mdt(const struct lu_env *env,
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
                 char *mdtname;
                 if (i !=  mti->mti_stripe_index &&
-                    cfs_test_bit(i,  fsdb->fsdb_mdt_index_map)) {
+                   test_bit(i,  fsdb->fsdb_mdt_index_map)) {
                        rc = name_create_mdt(&mdtname, mti->mti_fsname, i);
                        if (rc)
                                GOTO(out_end, rc);
@@ -1864,7 +1864,7 @@ static int mgs_write_log_osc_to_lov(const struct lu_env *env,
                GOTO(out_free, rc);
        /* for the system upgraded from old 1.8, keep using the old osc naming
         * style for mdt, see name_create_mdt_osc(). LU-1257 */
-       if (cfs_test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
+       if (test_bit(FSDB_OSCNAME18, &fsdb->fsdb_flags))
                rc = name_create(&oscname, svname, "");
        else
                rc = name_create(&oscname, svname, suffix);
@@ -1991,7 +1991,7 @@ out_end:
         /* We also have to update the other logs where this osc is part of
            the lov */
 
-        if (cfs_test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags)) {
+       if (test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags)) {
                 /* If we're upgrading, the old mdt log already has our
                    entry. Let's do a fake one for fun. */
                 /* Note that we can't add any new failnids, since we don't
@@ -2009,7 +2009,7 @@ out_end:
 
         /* Add ost to all MDT lov defs */
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
-                if (cfs_test_bit(i, fsdb->fsdb_mdt_index_map)) {
+               if (test_bit(i, fsdb->fsdb_mdt_index_map)) {
                         char mdt_index[9];
 
                        rc = name_create_mdt_and_lov(&logname, &lovname, fsdb,
@@ -2147,7 +2147,7 @@ static int mgs_write_log_add_failnid(const struct lu_env *env,
                 int i;
 
                 for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
-                        if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
+                       if (!test_bit(i, fsdb->fsdb_mdt_index_map))
                                 continue;
                        rc = name_create_mdt(&logname, mti->mti_fsname, i);
                        if (rc)
@@ -2408,10 +2408,10 @@ static int mgs_srpc_set_param_udesc_mem(struct fs_db *fsdb,
                 goto error_out;
 
         if (strcmp(ptr, "yes") == 0) {
-                cfs_set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
+               set_bit(FSDB_UDESC, &fsdb->fsdb_flags);
                 CWARN("Enable user descriptor shipping from client to MDT\n");
         } else if (strcmp(ptr, "no") == 0) {
-                cfs_clear_bit(FSDB_UDESC, &fsdb->fsdb_flags);
+               clear_bit(FSDB_UDESC, &fsdb->fsdb_flags);
                 CWARN("Disable user descriptor shipping from client to MDT\n");
         } else {
                 *(ptr - 1) = '=';
@@ -2455,7 +2455,7 @@ static int mgs_srpc_set_param_mem(struct fs_db *fsdb,
                 RETURN(rc);
 
         /* mgs rules implies must be mgc->mgs */
-        if (cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
+       if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
                 if ((rule.sr_from != LUSTRE_SP_MGC &&
                      rule.sr_from != LUSTRE_SP_ANY) ||
                     (rule.sr_to != LUSTRE_SP_MGS &&
@@ -2539,7 +2539,7 @@ static int mgs_srpc_set_param(const struct lu_env *env,
         if (rc)
                 goto out_free;
 
-        if (cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
+       if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags)) {
                 /*
                  * for mgs rules, make them effective immediately.
                  */
@@ -2769,7 +2769,7 @@ static int mgs_write_log_param(const struct lu_env *env,
                 /* Modify mdtlov */
                 /* Add to all MDT logs for CMD */
                 for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
-                        if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
+                       if (!test_bit(i, fsdb->fsdb_mdt_index_map))
                                 continue;
                        rc = name_create_mdt(&logname, mti->mti_fsname, i);
                        if (rc)
@@ -2787,7 +2787,7 @@ static int mgs_write_log_param(const struct lu_env *env,
                                            "changes were made to the "
                                            "config log.\n",
                                            mti->mti_svname, rc);
-                        if (cfs_test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags))
+                       if (test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags))
                                 LCONSOLE_ERROR_MSG(0x146, "This may be"
                                                    " because the log"
                                                    "is in the old 1.4"
@@ -2845,7 +2845,7 @@ static int mgs_write_log_param(const struct lu_env *env,
             (class_match_param(ptr, PARAM_LLITE, NULL) == 0)) {
                 char *cname;
 
-               if (cfs_test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags)) {
+               if (test_bit(FSDB_OLDLOG14, &fsdb->fsdb_flags)) {
                        LCONSOLE_ERROR_MSG(0x148, "Upgraded client logs for %s"
                                           " cannot be modified. Consider"
                                           " updating the configuration with"
@@ -2883,7 +2883,7 @@ static int mgs_write_log_param(const struct lu_env *env,
                         int i;
 
                         for (i = 0; i < INDEX_MAP_SIZE * 8; i++){
-                                if (!cfs_test_bit(i, fsdb->fsdb_mdt_index_map))
+                               if (!test_bit(i, fsdb->fsdb_mdt_index_map))
                                         continue;
                                 name_destroy(&cname);
                                rc = name_create_mdt_osc(&cname, mti->mti_svname,
@@ -2928,7 +2928,7 @@ static int mgs_write_log_param(const struct lu_env *env,
                         goto active_err;
                 if (rc & LDD_F_SV_ALL) {
                         for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
-                                if (!cfs_test_bit(i,
+                               if (!test_bit(i,
                                                   fsdb->fsdb_mdt_index_map))
                                         continue;
                                rc = name_create_mdt(&logname,
@@ -2997,9 +2997,9 @@ int mgs_check_failnid(const struct lu_env *env, struct mgs_device *mgs,
            the failover list.  Modify mti->params for rewriting back at
            server_register_target(). */
 
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
         rc = mgs_write_log_add_failnid(obd, fsdb, mti);
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
 
         RETURN(rc);
 #endif
@@ -3033,7 +3033,7 @@ int mgs_write_log_target(const struct lu_env *env,
                mti->mti_flags &= ~LDD_F_UPDATE;
        }
 
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
 
         if (mti->mti_flags &
             (LDD_F_VIRGIN | LDD_F_UPGRADE14 | LDD_F_WRITECONF)) {
@@ -3082,7 +3082,7 @@ int mgs_write_log_target(const struct lu_env *env,
         OBD_FREE(buf, strlen(mti->mti_params) + 1);
 
 out_up:
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
         RETURN(rc);
 }
 
@@ -3126,14 +3126,14 @@ int mgs_erase_logs(const struct lu_env *env, struct mgs_device *mgs, char *fsnam
        if (rc)
                RETURN(rc);
 
-        cfs_mutex_lock(&mgs->mgs_mutex);
+       mutex_lock(&mgs->mgs_mutex);
 
         /* Delete the fs db */
        fsdb = mgs_find_fsdb(mgs, fsname);
         if (fsdb)
                mgs_free_fsdb(mgs, fsdb);
 
-        cfs_mutex_unlock(&mgs->mgs_mutex);
+       mutex_unlock(&mgs->mgs_mutex);
 
        cfs_list_for_each_entry_safe(dirent, n, &list, list) {
                cfs_list_del(&dirent->list);
@@ -3230,8 +3230,8 @@ int mgs_setparam(const struct lu_env *env, struct mgs_device *mgs,
        rc = mgs_find_or_make_fsdb(env, mgs, fsname, &fsdb);
         if (rc)
                 RETURN(rc);
-        if (!cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags) &&
-            cfs_test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags)) {
+       if (!test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags) &&
+           test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags)) {
                 CERROR("No filesystem targets for %s.  cfg_device from lctl "
                        "is '%s'\n", fsname, devname);
                mgs_free_fsdb(mgs, fsdb);
@@ -3257,9 +3257,9 @@ int mgs_setparam(const struct lu_env *env, struct mgs_device *mgs,
 
         mti->mti_flags = rc | LDD_F_PARAM;
 
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
        rc = mgs_write_log_param(env, mgs, fsdb, mti, mti->mti_params);
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
 
         /*
          * Revoke lock so everyone updates.  Should be alright if
@@ -3316,7 +3316,7 @@ int mgs_pool_cmd(const struct lu_env *env, struct mgs_device *mgs,
                 CERROR("Can't get db for %s\n", fsname);
                 RETURN(rc);
         }
-        if (cfs_test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags)) {
+       if (test_bit(FSDB_LOG_EMPTY, &fsdb->fsdb_flags)) {
                 CERROR("%s is not defined\n", fsname);
                mgs_free_fsdb(mgs, fsdb);
                 RETURN(-EINVAL);
@@ -3370,7 +3370,7 @@ int mgs_pool_cmd(const struct lu_env *env, struct mgs_device *mgs,
                 break;
         }
 
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
 
         if (canceled_label != NULL) {
                 OBD_ALLOC_PTR(mti);
@@ -3380,11 +3380,11 @@ int mgs_pool_cmd(const struct lu_env *env, struct mgs_device *mgs,
 
         /* write pool def to all MDT logs */
         for (i = 0; i < INDEX_MAP_SIZE * 8; i++) {
-                 if (cfs_test_bit(i,  fsdb->fsdb_mdt_index_map)) {
+               if (test_bit(i,  fsdb->fsdb_mdt_index_map)) {
                        rc = name_create_mdt_and_lov(&logname, &lovname,
                                                     fsdb, i);
                        if (rc) {
-                               cfs_mutex_unlock(&fsdb->fsdb_mutex);
+                               mutex_unlock(&fsdb->fsdb_mutex);
                                GOTO(out_mti, rc);
                        }
                         if (canceled_label != NULL) {
@@ -3402,7 +3402,7 @@ int mgs_pool_cmd(const struct lu_env *env, struct mgs_device *mgs,
                         name_destroy(&logname);
                         name_destroy(&lovname);
                        if (rc) {
-                               cfs_mutex_unlock(&fsdb->fsdb_mutex);
+                               mutex_unlock(&fsdb->fsdb_mutex);
                                GOTO(out_mti, rc);
                        }
                 }
@@ -3410,14 +3410,14 @@ int mgs_pool_cmd(const struct lu_env *env, struct mgs_device *mgs,
 
        rc = name_create(&logname, fsname, "-client");
        if (rc) {
-               cfs_mutex_unlock(&fsdb->fsdb_mutex);
+               mutex_unlock(&fsdb->fsdb_mutex);
                GOTO(out_mti, rc);
        }
        if (canceled_label != NULL) {
                rc = mgs_modify(env, mgs, fsdb, mti, logname,
                                fsdb->fsdb_clilov, canceled_label, CM_SKIP);
                if (rc < 0) {
-                       cfs_mutex_unlock(&fsdb->fsdb_mutex);
+                       mutex_unlock(&fsdb->fsdb_mutex);
                        name_destroy(&logname);
                        GOTO(out_mti, rc);
                }
@@ -3425,7 +3425,7 @@ int mgs_pool_cmd(const struct lu_env *env, struct mgs_device *mgs,
 
        rc = mgs_write_log_pool(env, mgs, logname, fsdb, fsdb->fsdb_clilov,
                                cmd, fsname, poolname, ostname, label);
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
        name_destroy(&logname);
         /* request for update */
        mgs_revoke_lock(mgs, fsdb, CONFIG_T_CONFIG);
index e10c029..55a6dc9 100644 (file)
@@ -60,7 +60,7 @@ static int nidtbl_is_sane(struct mgs_nidtbl *tbl)
         struct mgs_nidtbl_target *tgt;
         int version = 0;
 
-        LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
+       LASSERT(mutex_is_locked(&tbl->mn_lock));
         cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
                 if (!tgt->mnt_version)
                         continue;
@@ -99,7 +99,7 @@ static int mgs_nidtbl_read(struct obd_export *exp, struct mgs_nidtbl *tbl,
         LASSERT((unit_size & (unit_size - 1)) == 0);
         LASSERT(nrpages << CFS_PAGE_SHIFT >= units_total * unit_size);
 
-        cfs_mutex_lock(&tbl->mn_lock);
+       mutex_lock(&tbl->mn_lock);
         LASSERT(nidtbl_is_sane(tbl));
 
         /* no more entries ? */
@@ -217,7 +217,7 @@ out:
         LASSERT(version <= tbl->mn_version);
         res->mcr_size = tbl->mn_version;
         res->mcr_offset = nobuf ? version : tbl->mn_version;
-        cfs_mutex_unlock(&tbl->mn_lock);
+       mutex_unlock(&tbl->mn_lock);
         LASSERT(ergo(version == 1, rc == 0)); /* get the log first time */
 
         CDEBUG(D_MGS, "Read IR logs %s return with %d, version %llu\n",
@@ -240,7 +240,7 @@ static int nidtbl_update_version(const struct lu_env *env,
        int               rc;
         ENTRY;
 
-       LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
+       LASSERT(mutex_is_locked(&tbl->mn_lock));
 
        fsdb = local_file_find_or_create(env, mgs->mgs_los, mgs->mgs_nidtbl_dir,
                                         tbl->mn_fsdb->fsdb_name,
@@ -288,7 +288,7 @@ static int nidtbl_read_version(const struct lu_env *env,
         int                  rc;
         ENTRY;
 
-        LASSERT(cfs_mutex_is_locked(&tbl->mn_lock));
+       LASSERT(mutex_is_locked(&tbl->mn_lock));
 
        LASSERT(mgs->mgs_nidtbl_dir);
        rc = dt_lookup_dir(env, mgs->mgs_nidtbl_dir, tbl->mn_fsdb->fsdb_name,
@@ -333,7 +333,7 @@ static int mgs_nidtbl_write(const struct lu_env *env, struct fs_db *fsdb,
         LASSERT(type != 0);
 
         tbl = &fsdb->fsdb_nidtbl;
-        cfs_mutex_lock(&tbl->mn_lock);
+       mutex_lock(&tbl->mn_lock);
         cfs_list_for_each_entry(tgt, &tbl->mn_targets, mnt_list) {
                 struct mgs_target_info *info = &tgt->mnt_mti;
                 if (type == tgt->mnt_type &&
@@ -364,7 +364,7 @@ static int mgs_nidtbl_write(const struct lu_env *env, struct fs_db *fsdb,
         EXIT;
 
 out:
-        cfs_mutex_unlock(&tbl->mn_lock);
+       mutex_unlock(&tbl->mn_lock);
         if (rc)
                 CERROR("Write NID table version for file system %s error %d\n",
                        fsdb->fsdb_name, rc);
@@ -376,10 +376,10 @@ static void mgs_nidtbl_fini_fs(struct fs_db *fsdb)
         struct mgs_nidtbl *tbl = &fsdb->fsdb_nidtbl;
         CFS_LIST_HEAD(head);
 
-        cfs_mutex_lock(&tbl->mn_lock);
+       mutex_lock(&tbl->mn_lock);
         tbl->mn_nr_targets = 0;
         cfs_list_splice_init(&tbl->mn_targets, &head);
-        cfs_mutex_unlock(&tbl->mn_lock);
+       mutex_unlock(&tbl->mn_lock);
 
         while (!cfs_list_empty(&head)) {
                 struct mgs_nidtbl_target *tgt;
@@ -395,12 +395,12 @@ static int mgs_nidtbl_init_fs(const struct lu_env *env, struct fs_db *fsdb)
        int rc;
 
         CFS_INIT_LIST_HEAD(&tbl->mn_targets);
-        cfs_mutex_init(&tbl->mn_lock);
+       mutex_init(&tbl->mn_lock);
         tbl->mn_nr_targets = 0;
         tbl->mn_fsdb = fsdb;
-        cfs_mutex_lock(&tbl->mn_lock);
+       mutex_lock(&tbl->mn_lock);
        rc = nidtbl_read_version(env, fsdb->fsdb_mgs, tbl, &tbl->mn_version);
-        cfs_mutex_unlock(&tbl->mn_lock);
+       mutex_unlock(&tbl->mn_lock);
        if (rc < 0)
                CERROR("%s: IR: failed to read current version, rc = %d\n",
                       fsdb->fsdb_mgs->mgs_obd->obd_name, rc);
@@ -442,7 +442,7 @@ static int mgs_ir_notify(void *arg)
         sprintf(name, "mgs_%s_notify", fsdb->fsdb_name);
         cfs_daemonize(name);
 
-        cfs_complete(&fsdb->fsdb_notify_comp);
+       complete(&fsdb->fsdb_notify_comp);
 
         set_user_nice(current, -2);
 
@@ -464,7 +464,7 @@ static int mgs_ir_notify(void *arg)
                mgs_revoke_lock(fsdb->fsdb_mgs, fsdb, CONFIG_T_RECOVER);
         }
 
-        cfs_complete(&fsdb->fsdb_notify_comp);
+       complete(&fsdb->fsdb_notify_comp);
         return 0;
 }
 
@@ -487,10 +487,10 @@ int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
        fsdb->fsdb_mgs = mgs;
         cfs_atomic_set(&fsdb->fsdb_notify_phase, 0);
         cfs_waitq_init(&fsdb->fsdb_notify_waitq);
-        cfs_init_completion(&fsdb->fsdb_notify_comp);
+       init_completion(&fsdb->fsdb_notify_comp);
         rc = cfs_create_thread(mgs_ir_notify, fsdb, CFS_DAEMON_FLAGS);
         if (rc > 0)
-                cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
+               wait_for_completion(&fsdb->fsdb_notify_comp);
         else
                 CERROR("Start notify thread error %d\n", rc);
 
@@ -500,7 +500,7 @@ int mgs_ir_init_fs(const struct lu_env *env, struct mgs_device *mgs,
 
 void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
 {
-        if (cfs_test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
+       if (test_bit(FSDB_MGS_SELF, &fsdb->fsdb_flags))
                 return;
 
         mgs_fsc_cleanup_by_fsdb(fsdb);
@@ -511,7 +511,7 @@ void mgs_ir_fini_fs(struct mgs_device *mgs, struct fs_db *fsdb)
 
         fsdb->fsdb_notify_stop = 1;
         cfs_waitq_signal(&fsdb->fsdb_notify_waitq);
-        cfs_wait_for_completion(&fsdb->fsdb_notify_comp);
+       wait_for_completion(&fsdb->fsdb_notify_comp);
 }
 
 /* caller must have held fsdb_mutex */
@@ -546,7 +546,7 @@ int mgs_ir_update(const struct lu_env *env, struct mgs_device *mgs,
                 return rc;
 
         /* check ir state */
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
         ir_state_graduate(fsdb);
         switch (fsdb->fsdb_ir_state) {
         case IR_FULL:
@@ -560,7 +560,7 @@ int mgs_ir_update(const struct lu_env *env, struct mgs_device *mgs,
         default:
                 LBUG();
         }
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
 
         LASSERT(ergo(mti->mti_flags & LDD_F_IR_CAPABLE, notify));
         if (notify) {
@@ -716,11 +716,11 @@ static int lprocfs_ir_set_state(struct fs_db *fsdb, const char *buf)
 
         CDEBUG(D_MGS, "change fsr state of %s from %s to %s\n",
                fsdb->fsdb_name, strings[fsdb->fsdb_ir_state], strings[state]);
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
         if (state == IR_FULL && fsdb->fsdb_nonir_clients)
                 state = IR_PARTIAL;
         fsdb->fsdb_ir_state = state;
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       mutex_unlock(&fsdb->fsdb_mutex);
 
         return 0;
 }
@@ -885,10 +885,10 @@ int mgs_fsc_attach(const struct lu_env *env, struct obd_export *exp,
                         !!(exp->exp_connect_flags & OBD_CONNECT_IMP_RECOV);
 
         rc = -EEXIST;
-        cfs_mutex_lock(&fsdb->fsdb_mutex);
+       mutex_lock(&fsdb->fsdb_mutex);
 
-        /* tend to find it in export list because this list is shorter. */
-        cfs_spin_lock(&data->med_lock);
+       /* tend to find it in export list because this list is shorter. */
+       spin_lock(&data->med_lock);
         cfs_list_for_each_entry(fsc, &data->med_clients, mfc_export_list) {
                 if (strcmp(fsname, fsc->mfc_fsdb->fsdb_name) == 0) {
                         found = true;
@@ -911,8 +911,8 @@ int mgs_fsc_attach(const struct lu_env *env, struct obd_export *exp,
                 }
                 rc = 0;
         }
-        cfs_spin_unlock(&data->med_lock);
-        cfs_mutex_unlock(&fsdb->fsdb_mutex);
+       spin_unlock(&data->med_lock);
+       mutex_unlock(&fsdb->fsdb_mutex);
 
         if (new_fsc) {
                 class_export_put(new_fsc->mfc_export);
@@ -923,33 +923,33 @@ int mgs_fsc_attach(const struct lu_env *env, struct obd_export *exp,
 
 void mgs_fsc_cleanup(struct obd_export *exp)
 {
-        struct mgs_export_data *data = &exp->u.eu_mgs_data;
-        struct mgs_fsc *fsc, *tmp;
-        CFS_LIST_HEAD(head);
-
-        cfs_spin_lock(&data->med_lock);
-        cfs_list_splice_init(&data->med_clients, &head);
-        cfs_spin_unlock(&data->med_lock);
-
-        cfs_list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
-                struct fs_db *fsdb = fsc->mfc_fsdb;
-
-                LASSERT(fsc->mfc_export == exp);
-
-                cfs_mutex_lock(&fsdb->fsdb_mutex);
-                cfs_list_del_init(&fsc->mfc_fsdb_list);
-                if (fsc->mfc_ir_capable == 0) {
-                        --fsdb->fsdb_nonir_clients;
-                        LASSERT(fsdb->fsdb_ir_state != IR_FULL);
-                        if (fsdb->fsdb_nonir_clients == 0 &&
-                            fsdb->fsdb_ir_state == IR_PARTIAL)
-                                fsdb->fsdb_ir_state = IR_FULL;
-                }
-                cfs_mutex_unlock(&fsdb->fsdb_mutex);
-                cfs_list_del_init(&fsc->mfc_export_list);
-                class_export_put(fsc->mfc_export);
-                OBD_FREE_PTR(fsc);
-        }
+       struct mgs_export_data *data = &exp->u.eu_mgs_data;
+       struct mgs_fsc *fsc, *tmp;
+       CFS_LIST_HEAD(head);
+
+       spin_lock(&data->med_lock);
+       cfs_list_splice_init(&data->med_clients, &head);
+       spin_unlock(&data->med_lock);
+
+       cfs_list_for_each_entry_safe(fsc, tmp, &head, mfc_export_list) {
+               struct fs_db *fsdb = fsc->mfc_fsdb;
+
+               LASSERT(fsc->mfc_export == exp);
+
+               mutex_lock(&fsdb->fsdb_mutex);
+               cfs_list_del_init(&fsc->mfc_fsdb_list);
+               if (fsc->mfc_ir_capable == 0) {
+                       --fsdb->fsdb_nonir_clients;
+                       LASSERT(fsdb->fsdb_ir_state != IR_FULL);
+                       if (fsdb->fsdb_nonir_clients == 0 &&
+                           fsdb->fsdb_ir_state == IR_PARTIAL)
+                               fsdb->fsdb_ir_state = IR_FULL;
+               }
+               mutex_unlock(&fsdb->fsdb_mutex);
+               cfs_list_del_init(&fsc->mfc_export_list);
+               class_export_put(fsc->mfc_export);
+               OBD_FREE_PTR(fsc);
+       }
 }
 
 /* must be called with fsdb->fsdb_mutex held */
@@ -964,9 +964,9 @@ void mgs_fsc_cleanup_by_fsdb(struct fs_db *fsdb)
                 LASSERT(fsdb == fsc->mfc_fsdb);
                 cfs_list_del_init(&fsc->mfc_fsdb_list);
 
-                cfs_spin_lock(&data->med_lock);
-                cfs_list_del_init(&fsc->mfc_export_list);
-                cfs_spin_unlock(&data->med_lock);
+               spin_lock(&data->med_lock);
+               cfs_list_del_init(&fsc->mfc_export_list);
+               spin_unlock(&data->med_lock);
                 class_export_put(fsc->mfc_export);
                 OBD_FREE_PTR(fsc);
         }
index 4fc3fff..35318de 100644 (file)
@@ -117,19 +117,19 @@ static inline void capa_delete(struct obd_capa *ocapa)
 
 void cleanup_capa_hash(cfs_hlist_head_t *hash)
 {
-        int i;
-        cfs_hlist_node_t *pos, *next;
-        struct obd_capa *oc;
-
-        cfs_spin_lock(&capa_lock);
-        for (i = 0; i < NR_CAPAHASH; i++) {
-                cfs_hlist_for_each_entry_safe(oc, pos, next, hash + i,
-                                              u.tgt.c_hash)
-                        capa_delete(oc);
-        }
-        cfs_spin_unlock(&capa_lock);
+       int i;
+       cfs_hlist_node_t *pos, *next;
+       struct obd_capa *oc;
+
+       spin_lock(&capa_lock);
+       for (i = 0; i < NR_CAPAHASH; i++) {
+               cfs_hlist_for_each_entry_safe(oc, pos, next, hash + i,
+                                             u.tgt.c_hash)
+                       capa_delete(oc);
+       }
+       spin_unlock(&capa_lock);
 
-        OBD_FREE(hash, CFS_PAGE_SIZE);
+       OBD_FREE(hash, CFS_PAGE_SIZE);
 }
 EXPORT_SYMBOL(cleanup_capa_hash);
 
@@ -201,7 +201,7 @@ struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa)
         if (IS_ERR(ocapa))
                 return NULL;
 
-        cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
         old = find_capa(capa, head, 0);
         if (!old) {
                 ocapa->c_capa = *capa;
@@ -212,32 +212,32 @@ struct obd_capa *capa_add(cfs_hlist_head_t *hash, struct lustre_capa *capa)
                 capa_count[CAPA_SITE_SERVER]++;
                 if (capa_count[CAPA_SITE_SERVER] > CAPA_HASH_SIZE)
                         capa_delete_lru(list);
-                cfs_spin_unlock(&capa_lock);
-                return ocapa;
-        } else {
-                capa_get(old);
-                cfs_spin_unlock(&capa_lock);
-                capa_put(ocapa);
-                return old;
-        }
+               spin_unlock(&capa_lock);
+               return ocapa;
+       } else {
+               capa_get(old);
+               spin_unlock(&capa_lock);
+               capa_put(ocapa);
+               return old;
+       }
 }
 EXPORT_SYMBOL(capa_add);
 
 struct obd_capa *capa_lookup(cfs_hlist_head_t *hash, struct lustre_capa *capa,
-                             int alive)
+                            int alive)
 {
-        struct obd_capa *ocapa;
-
-        cfs_spin_lock(&capa_lock);
-        ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
-        if (ocapa) {
-                cfs_list_move_tail(&ocapa->c_list,
-                                   &capa_list[CAPA_SITE_SERVER]);
-                capa_get(ocapa);
-        }
-        cfs_spin_unlock(&capa_lock);
+       struct obd_capa *ocapa;
+
+       spin_lock(&capa_lock);
+       ocapa = find_capa(capa, hash + capa_hashfn(&capa->lc_fid), alive);
+       if (ocapa) {
+               cfs_list_move_tail(&ocapa->c_list,
+                                  &capa_list[CAPA_SITE_SERVER]);
+               capa_get(ocapa);
+       }
+       spin_unlock(&capa_lock);
 
-        return ocapa;
+       return ocapa;
 }
 EXPORT_SYMBOL(capa_lookup);
 
@@ -384,9 +384,9 @@ EXPORT_SYMBOL(capa_decrypt_id);
 
 void capa_cpy(void *capa, struct obd_capa *ocapa)
 {
-        cfs_spin_lock(&ocapa->c_lock);
-        *(struct lustre_capa *)capa = ocapa->c_capa;
-        cfs_spin_unlock(&ocapa->c_lock);
+       spin_lock(&ocapa->c_lock);
+       *(struct lustre_capa *)capa = ocapa->c_capa;
+       spin_unlock(&ocapa->c_lock);
 }
 EXPORT_SYMBOL(capa_cpy);
 
index 5bae736..36f969e 100644 (file)
@@ -1106,9 +1106,9 @@ void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
         LASSERT(page->cp_owner != NULL);
         LINVRNT(plist->pl_owner == cfs_current());
 
-        cfs_lockdep_off();
-        cfs_mutex_lock(&page->cp_mutex);
-        cfs_lockdep_on();
+       lockdep_off();
+       mutex_lock(&page->cp_mutex);
+       lockdep_on();
         LASSERT(cfs_list_empty(&page->cp_batch));
         cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
         ++plist->pl_nr;
@@ -1129,9 +1129,9 @@ void cl_page_list_del(const struct lu_env *env,
 
         ENTRY;
         cfs_list_del_init(&page->cp_batch);
-        cfs_lockdep_off();
-        cfs_mutex_unlock(&page->cp_mutex);
-        cfs_lockdep_on();
+       lockdep_off();
+       mutex_unlock(&page->cp_mutex);
+       lockdep_on();
         --plist->pl_nr;
         lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
         cl_page_put(env, page);
@@ -1196,9 +1196,9 @@ void cl_page_list_disown(const struct lu_env *env,
                 LASSERT(plist->pl_nr > 0);
 
                 cfs_list_del_init(&page->cp_batch);
-                cfs_lockdep_off();
-                cfs_mutex_unlock(&page->cp_mutex);
-                cfs_lockdep_on();
+               lockdep_off();
+               mutex_unlock(&page->cp_mutex);
+               lockdep_on();
                 --plist->pl_nr;
                 /*
                  * cl_page_disown0 rather than usual cl_page_disown() is used,
index 5dcccbb..5a9ee70 100644 (file)
@@ -51,7 +51,7 @@
 #include "cl_internal.h"
 
 /** Lock class of cl_lock::cll_guard */
-static cfs_lock_class_key_t cl_lock_guard_class;
+static struct lock_class_key cl_lock_guard_class;
 static cfs_mem_cache_t *cl_lock_kmem;
 
 static struct lu_kmem_descr cl_lock_caches[] = {
@@ -142,7 +142,7 @@ static void cl_lock_trace0(int level, const struct lu_env *env,
 #define RETIP ((unsigned long)__builtin_return_address(0))
 
 #ifdef CONFIG_LOCKDEP
-static cfs_lock_class_key_t cl_lock_key;
+static struct lock_class_key cl_lock_key;
 
 static void cl_lock_lockdep_init(struct cl_lock *lock)
 {
@@ -275,7 +275,7 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
         cl_object_put(env, obj);
         lu_ref_fini(&lock->cll_reference);
         lu_ref_fini(&lock->cll_holders);
-        cfs_mutex_destroy(&lock->cll_guard);
+       mutex_destroy(&lock->cll_guard);
         OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
         EXIT;
 }
@@ -389,8 +389,8 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
                 CFS_INIT_LIST_HEAD(&lock->cll_inclosure);
                 lu_ref_init(&lock->cll_reference);
                 lu_ref_init(&lock->cll_holders);
-                cfs_mutex_init(&lock->cll_guard);
-                cfs_lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
+               mutex_init(&lock->cll_guard);
+               lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
                 cfs_waitq_init(&lock->cll_wq);
                 head = obj->co_lu.lo_header;
                 cfs_atomic_inc(&site->cs_locks_state[CLS_NEW]);
@@ -546,24 +546,24 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
         head = cl_object_header(obj);
         site = cl_object_site(obj);
 
-        cfs_spin_lock(&head->coh_lock_guard);
-        lock = cl_lock_lookup(env, obj, io, need);
-        cfs_spin_unlock(&head->coh_lock_guard);
-
-        if (lock == NULL) {
-                lock = cl_lock_alloc(env, obj, io, need);
-                if (!IS_ERR(lock)) {
-                        struct cl_lock *ghost;
-
-                        cfs_spin_lock(&head->coh_lock_guard);
-                        ghost = cl_lock_lookup(env, obj, io, need);
-                        if (ghost == NULL) {
-                                cfs_list_add_tail(&lock->cll_linkage,
-                                                  &head->coh_locks);
-                                cfs_spin_unlock(&head->coh_lock_guard);
-                                cfs_atomic_inc(&site->cs_locks.cs_busy);
-                        } else {
-                                cfs_spin_unlock(&head->coh_lock_guard);
+       spin_lock(&head->coh_lock_guard);
+       lock = cl_lock_lookup(env, obj, io, need);
+       spin_unlock(&head->coh_lock_guard);
+
+       if (lock == NULL) {
+               lock = cl_lock_alloc(env, obj, io, need);
+               if (!IS_ERR(lock)) {
+                       struct cl_lock *ghost;
+
+                       spin_lock(&head->coh_lock_guard);
+                       ghost = cl_lock_lookup(env, obj, io, need);
+                       if (ghost == NULL) {
+                               cfs_list_add_tail(&lock->cll_linkage,
+                                                 &head->coh_locks);
+                               spin_unlock(&head->coh_lock_guard);
+                               cfs_atomic_inc(&site->cs_locks.cs_busy);
+                       } else {
+                               spin_unlock(&head->coh_lock_guard);
                                 /*
                                  * Other threads can acquire references to the
                                  * top-lock through its sub-locks. Hence, it
@@ -594,9 +594,9 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
         head = cl_object_header(obj);
 
        do {
-               cfs_spin_lock(&head->coh_lock_guard);
+               spin_lock(&head->coh_lock_guard);
                lock = cl_lock_lookup(env, obj, io, need);
-               cfs_spin_unlock(&head->coh_lock_guard);
+               spin_unlock(&head->coh_lock_guard);
                if (lock == NULL)
                        return NULL;
 
@@ -694,7 +694,7 @@ void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
                 info = cl_env_info(env);
                 for (i = 0; i < hdr->coh_nesting; ++i)
                         LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
-                cfs_mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
+               mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
                 lock->cll_guarder = cfs_current();
                 LINVRNT(lock->cll_depth == 0);
         }
@@ -724,7 +724,7 @@ int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
         if (lock->cll_guarder == cfs_current()) {
                 LINVRNT(lock->cll_depth > 0);
                 cl_lock_mutex_tail(env, lock);
-        } else if (cfs_mutex_trylock(&lock->cll_guard)) {
+       } else if (mutex_trylock(&lock->cll_guard)) {
                 LINVRNT(lock->cll_depth == 0);
                 lock->cll_guarder = cfs_current();
                 cl_lock_mutex_tail(env, lock);
@@ -758,7 +758,7 @@ void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
         counters->ctc_nr_locks_locked--;
         if (--lock->cll_depth == 0) {
                 lock->cll_guarder = NULL;
-                cfs_mutex_unlock(&lock->cll_guard);
+               mutex_unlock(&lock->cll_guard);
         }
 }
 EXPORT_SYMBOL(cl_lock_mutex_put);
@@ -826,9 +826,9 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
 
                 head = cl_object_header(lock->cll_descr.cld_obj);
 
-                cfs_spin_lock(&head->coh_lock_guard);
-                cfs_list_del_init(&lock->cll_linkage);
-                cfs_spin_unlock(&head->coh_lock_guard);
+               spin_lock(&head->coh_lock_guard);
+               cfs_list_del_init(&lock->cll_linkage);
+               spin_unlock(&head->coh_lock_guard);
 
                 /*
                  * From now on, no new references to this lock can be acquired
@@ -1613,10 +1613,10 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
          * now. If locks were indexed according to their extent and/or mode,
          * that index would have to be updated here.
          */
-        cfs_spin_lock(&hdr->coh_lock_guard);
-        lock->cll_descr = *desc;
-        cfs_spin_unlock(&hdr->coh_lock_guard);
-        RETURN(0);
+       spin_lock(&hdr->coh_lock_guard);
+       lock->cll_descr = *desc;
+       spin_unlock(&hdr->coh_lock_guard);
+       RETURN(0);
 }
 EXPORT_SYMBOL(cl_lock_modify);
 
@@ -1867,7 +1867,7 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
        need->cld_start = need->cld_end = index;
         need->cld_enq_flags = 0;
 
-        cfs_spin_lock(&head->coh_lock_guard);
+       spin_lock(&head->coh_lock_guard);
         /* It is fine to match any group lock since there could be only one
          * with a uniq gid and it conflicts with all other lock modes too */
         cfs_list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
@@ -1890,8 +1890,8 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
                         break;
                 }
         }
-        cfs_spin_unlock(&head->coh_lock_guard);
-        RETURN(lock);
+       spin_unlock(&head->coh_lock_guard);
+       RETURN(lock);
 }
 EXPORT_SYMBOL(cl_lock_at_pgoff);
 
@@ -2040,12 +2040,12 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
         LASSERT(ergo(!cancel,
                      head->coh_tree.rnode == NULL && head->coh_pages == 0));
 
-        cfs_spin_lock(&head->coh_lock_guard);
-        while (!cfs_list_empty(&head->coh_locks)) {
-                lock = container_of(head->coh_locks.next,
-                                    struct cl_lock, cll_linkage);
-                cl_lock_get_trust(lock);
-                cfs_spin_unlock(&head->coh_lock_guard);
+       spin_lock(&head->coh_lock_guard);
+       while (!cfs_list_empty(&head->coh_locks)) {
+               lock = container_of(head->coh_locks.next,
+                                   struct cl_lock, cll_linkage);
+               cl_lock_get_trust(lock);
+               spin_unlock(&head->coh_lock_guard);
                 lu_ref_add(&lock->cll_reference, "prune", cfs_current());
 
 again:
@@ -2069,10 +2069,10 @@ again:
                 cl_lock_mutex_put(env, lock);
                 lu_ref_del(&lock->cll_reference, "prune", cfs_current());
                 cl_lock_put(env, lock);
-                cfs_spin_lock(&head->coh_lock_guard);
-        }
-        cfs_spin_unlock(&head->coh_lock_guard);
-        EXIT;
+               spin_lock(&head->coh_lock_guard);
+       }
+       spin_unlock(&head->coh_lock_guard);
+       EXIT;
 }
 EXPORT_SYMBOL(cl_locks_prune);
 
index 4b0e303..248ccce 100644 (file)
 static cfs_mem_cache_t *cl_env_kmem;
 
 /** Lock class of cl_object_header::coh_page_guard */
-static cfs_lock_class_key_t cl_page_guard_class;
+static struct lock_class_key cl_page_guard_class;
 /** Lock class of cl_object_header::coh_lock_guard */
-static cfs_lock_class_key_t cl_lock_guard_class;
+static struct lock_class_key cl_lock_guard_class;
 /** Lock class of cl_object_header::coh_attr_guard */
-static cfs_lock_class_key_t cl_attr_guard_class;
+static struct lock_class_key cl_attr_guard_class;
 
 extern __u32 lu_context_tags_default;
 extern __u32 lu_session_tags_default;
@@ -85,12 +85,12 @@ int cl_object_header_init(struct cl_object_header *h)
         ENTRY;
         result = lu_object_header_init(&h->coh_lu);
         if (result == 0) {
-                cfs_spin_lock_init(&h->coh_page_guard);
-                cfs_spin_lock_init(&h->coh_lock_guard);
-                cfs_spin_lock_init(&h->coh_attr_guard);
-                cfs_lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
-                cfs_lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
-                cfs_lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
+               spin_lock_init(&h->coh_page_guard);
+               spin_lock_init(&h->coh_lock_guard);
+               spin_lock_init(&h->coh_attr_guard);
+               lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
+               lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
+               lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
                 h->coh_pages = 0;
                 /* XXX hard coded GFP_* mask. */
                 INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
@@ -183,9 +183,9 @@ EXPORT_SYMBOL(cl_object_top);
  *
  * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
  */
-static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o)
+static spinlock_t *cl_object_attr_guard(struct cl_object *o)
 {
-        return &cl_object_header(cl_object_top(o))->coh_attr_guard;
+       return &cl_object_header(cl_object_top(o))->coh_attr_guard;
 }
 
 /**
@@ -197,7 +197,7 @@ static cfs_spinlock_t *cl_object_attr_guard(struct cl_object *o)
  */
 void cl_object_attr_lock(struct cl_object *o)
 {
-        cfs_spin_lock(cl_object_attr_guard(o));
+       spin_lock(cl_object_attr_guard(o));
 }
 EXPORT_SYMBOL(cl_object_attr_lock);
 
@@ -206,7 +206,7 @@ EXPORT_SYMBOL(cl_object_attr_lock);
  */
 void cl_object_attr_unlock(struct cl_object *o)
 {
-        cfs_spin_unlock(cl_object_attr_guard(o));
+       spin_unlock(cl_object_attr_guard(o));
 }
 EXPORT_SYMBOL(cl_object_attr_unlock);
 
@@ -347,7 +347,7 @@ void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
         LASSERT(hdr->coh_tree.rnode == NULL);
         LASSERT(hdr->coh_pages == 0);
 
-        cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
+       set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
         /*
          * Destroy all locks. Object destruction (including cl_inode_fini())
          * cannot cancel the locks, because in the case of a local client,
@@ -376,14 +376,14 @@ EXPORT_SYMBOL(cl_object_prune);
  */
 int cl_object_has_locks(struct cl_object *obj)
 {
-        struct cl_object_header *head = cl_object_header(obj);
-        int has;
+       struct cl_object_header *head = cl_object_header(obj);
+       int has;
 
-        cfs_spin_lock(&head->coh_lock_guard);
-        has = cfs_list_empty(&head->coh_locks);
-        cfs_spin_unlock(&head->coh_lock_guard);
+       spin_lock(&head->coh_lock_guard);
+       has = cfs_list_empty(&head->coh_locks);
+       spin_unlock(&head->coh_lock_guard);
 
-        return (has == 0);
+       return (has == 0);
 }
 EXPORT_SYMBOL(cl_object_has_locks);
 
@@ -794,19 +794,19 @@ static void cl_env_fini(struct cl_env *cle)
 
 static struct lu_env *cl_env_obtain(void *debug)
 {
-        struct cl_env *cle;
-        struct lu_env *env;
+       struct cl_env *cle;
+       struct lu_env *env;
 
-        ENTRY;
-        cfs_spin_lock(&cl_envs_guard);
-        LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
-        if (cl_envs_cached_nr > 0) {
-                int rc;
+       ENTRY;
+       spin_lock(&cl_envs_guard);
+       LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+       if (cl_envs_cached_nr > 0) {
+               int rc;
 
-                cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
-                cfs_list_del_init(&cle->ce_linkage);
-                cl_envs_cached_nr--;
-                cfs_spin_unlock(&cl_envs_guard);
+               cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+               cfs_list_del_init(&cle->ce_linkage);
+               cl_envs_cached_nr--;
+               spin_unlock(&cl_envs_guard);
 
                 env = &cle->ce_lu;
                 rc = lu_env_refill(env);
@@ -819,11 +819,11 @@ static struct lu_env *cl_env_obtain(void *debug)
                         env = ERR_PTR(rc);
                 }
         } else {
-                cfs_spin_unlock(&cl_envs_guard);
-                env = cl_env_new(lu_context_tags_default,
-                                 lu_session_tags_default, debug);
-        }
-        RETURN(env);
+               spin_unlock(&cl_envs_guard);
+               env = cl_env_new(lu_context_tags_default,
+                                lu_session_tags_default, debug);
+       }
+       RETURN(env);
 }
 
 static inline struct cl_env *cl_env_container(struct lu_env *env)
@@ -922,23 +922,23 @@ static void cl_env_exit(struct cl_env *cle)
  */
 unsigned cl_env_cache_purge(unsigned nr)
 {
-        struct cl_env *cle;
-
-        ENTRY;
-        cfs_spin_lock(&cl_envs_guard);
-        for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
-                cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
-                cfs_list_del_init(&cle->ce_linkage);
-                LASSERT(cl_envs_cached_nr > 0);
-                cl_envs_cached_nr--;
-                cfs_spin_unlock(&cl_envs_guard);
-
-                cl_env_fini(cle);
-                cfs_spin_lock(&cl_envs_guard);
-        }
-        LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
-        cfs_spin_unlock(&cl_envs_guard);
-        RETURN(nr);
+       struct cl_env *cle;
+
+       ENTRY;
+       spin_lock(&cl_envs_guard);
+       for (; !cfs_list_empty(&cl_envs) && nr > 0; --nr) {
+               cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+               cfs_list_del_init(&cle->ce_linkage);
+               LASSERT(cl_envs_cached_nr > 0);
+               cl_envs_cached_nr--;
+               spin_unlock(&cl_envs_guard);
+
+               cl_env_fini(cle);
+               spin_lock(&cl_envs_guard);
+       }
+       LASSERT(equi(cl_envs_cached_nr == 0, cfs_list_empty(&cl_envs)));
+       spin_unlock(&cl_envs_guard);
+       RETURN(nr);
 }
 EXPORT_SYMBOL(cl_env_cache_purge);
 
@@ -973,13 +973,13 @@ void cl_env_put(struct lu_env *env, int *refcheck)
                 if (cl_envs_cached_nr < cl_envs_cached_max &&
                     (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
                     (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
-                        cfs_spin_lock(&cl_envs_guard);
-                        cfs_list_add(&cle->ce_linkage, &cl_envs);
-                        cl_envs_cached_nr++;
-                        cfs_spin_unlock(&cl_envs_guard);
-                } else
-                        cl_env_fini(cle);
-        }
+                       spin_lock(&cl_envs_guard);
+                       cfs_list_add(&cle->ce_linkage, &cl_envs);
+                       cl_envs_cached_nr++;
+                       spin_unlock(&cl_envs_guard);
+               } else
+                       cl_env_fini(cle);
+       }
 }
 EXPORT_SYMBOL(cl_env_put);
 
index ba12d8a..9dbdf24 100644 (file)
@@ -203,7 +203,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
         hdr = cl_object_header(obj);
         pvec = cl_env_info(env)->clt_pvec;
         dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
-        cfs_spin_lock(&hdr->coh_page_guard);
+       spin_lock(&hdr->coh_page_guard);
         while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
                                             idx, CLT_PVEC_SIZE)) > 0) {
                 int end_of_region = 0;
@@ -249,7 +249,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                  * check that pages weren't truncated (cl_page_own() returns
                  * error in the latter case).
                  */
-                cfs_spin_unlock(&hdr->coh_page_guard);
+               spin_unlock(&hdr->coh_page_guard);
                 tree_lock = 0;
 
                 for (i = 0; i < j; ++i) {
@@ -268,12 +268,12 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
                 if (res != CLP_GANG_OKAY)
                         break;
 
-                cfs_spin_lock(&hdr->coh_page_guard);
-                tree_lock = 1;
-        }
-        if (tree_lock)
-                cfs_spin_unlock(&hdr->coh_page_guard);
-        RETURN(res);
+               spin_lock(&hdr->coh_page_guard);
+               tree_lock = 1;
+       }
+       if (tree_lock)
+               spin_unlock(&hdr->coh_page_guard);
+       RETURN(res);
 }
 EXPORT_SYMBOL(cl_page_gang_lookup);
 
@@ -342,12 +342,12 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
                                                      "cl_page", page);
                 page->cp_index = ind;
                 cl_page_state_set_trust(page, CPS_CACHED);
-               cfs_spin_lock_init(&page->cp_lock);
-                page->cp_type = type;
-                CFS_INIT_LIST_HEAD(&page->cp_layers);
-                CFS_INIT_LIST_HEAD(&page->cp_batch);
-                CFS_INIT_LIST_HEAD(&page->cp_flight);
-                cfs_mutex_init(&page->cp_mutex);
+               spin_lock_init(&page->cp_lock);
+               page->cp_type = type;
+               CFS_INIT_LIST_HEAD(&page->cp_layers);
+               CFS_INIT_LIST_HEAD(&page->cp_batch);
+               CFS_INIT_LIST_HEAD(&page->cp_flight);
+               mutex_init(&page->cp_mutex);
                 lu_ref_init(&page->cp_reference);
                 head = o->co_lu.lo_header;
                 cfs_list_for_each_entry(o, &head->loh_layers,
@@ -459,7 +459,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
          * XXX optimization: use radix_tree_preload() here, and change tree
          * gfp mask to GFP_KERNEL in cl_object_header_init().
          */
-        cfs_spin_lock(&hdr->coh_page_guard);
+       spin_lock(&hdr->coh_page_guard);
         err = radix_tree_insert(&hdr->coh_tree, idx, page);
         if (err != 0) {
                 ghost = page;
@@ -487,7 +487,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                 }
                 hdr->coh_pages++;
         }
-        cfs_spin_unlock(&hdr->coh_page_guard);
+       spin_unlock(&hdr->coh_page_guard);
 
         if (unlikely(ghost != NULL)) {
                 cfs_atomic_dec(&site->cs_pages.cs_busy);
@@ -670,7 +670,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
                          * inside the cp_lock. So that if it gets here,
                          * it is the REALLY last reference to this page.
                          */
-                        cfs_spin_unlock(&page->cp_lock);
+                       spin_unlock(&page->cp_lock);
 
                         LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
                         PASSERT(env, page, page->cp_owner == NULL);
@@ -684,10 +684,10 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
                         EXIT;
                         return;
                 }
-                cfs_spin_unlock(&page->cp_lock);
-        }
+               spin_unlock(&page->cp_lock);
+       }
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(cl_page_put);
 
@@ -739,16 +739,16 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
        if (top == NULL)
                RETURN(NULL);
 
-       cfs_spin_lock(&top->cp_lock);
-        for (page = top; page != NULL; page = page->cp_child) {
-                if (cl_object_same(page->cp_obj, obj)) {
-                        cl_page_get_trust(page);
-                        break;
-                }
-        }
-        cfs_spin_unlock(&top->cp_lock);
-        LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
-        RETURN(page);
+       spin_lock(&top->cp_lock);
+       for (page = top; page != NULL; page = page->cp_child) {
+               if (cl_object_same(page->cp_obj, obj)) {
+                       cl_page_get_trust(page);
+                       break;
+               }
+       }
+       spin_unlock(&top->cp_lock);
+       LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
+       RETURN(page);
 }
 EXPORT_SYMBOL(cl_vmpage_page);
 
@@ -1157,13 +1157,13 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                         struct cl_object_header *hdr;
 
                         hdr = cl_object_header(tmp->cp_obj);
-                        cfs_spin_lock(&hdr->coh_page_guard);
-                        value = radix_tree_delete(&hdr->coh_tree,
-                                                  tmp->cp_index);
-                        PASSERT(env, tmp, value == tmp);
-                        PASSERT(env, tmp, hdr->coh_pages > 0);
-                        hdr->coh_pages--;
-                        cfs_spin_unlock(&hdr->coh_page_guard);
+                       spin_lock(&hdr->coh_page_guard);
+                       value = radix_tree_delete(&hdr->coh_tree,
+                                                 tmp->cp_index);
+                       PASSERT(env, tmp, value == tmp);
+                       PASSERT(env, tmp, hdr->coh_pages > 0);
+                       hdr->coh_pages--;
+                       spin_unlock(&hdr->coh_page_guard);
                 }
         }
 
index 8d4b47f..d7f6050 100644 (file)
@@ -497,7 +497,7 @@ int obd_init_checks(void)
 #define obd_init_checks() do {} while(0)
 #endif
 
-extern cfs_spinlock_t obd_types_lock;
+extern spinlock_t obd_types_lock;
 extern int class_procfs_init(void);
 extern int class_procfs_clean(void);
 
@@ -517,7 +517,7 @@ int init_obdclass(void)
 
         LCONSOLE_INFO("Lustre: Build Version: "BUILD_VERSION"\n");
 
-        cfs_spin_lock_init(&obd_types_lock);
+       spin_lock_init(&obd_types_lock);
         obd_zombie_impexp_init();
 #ifdef LPROCFS
         obd_memory = lprocfs_alloc_stats(OBD_STATS_NUM,
index 602abb8..e22b7f8 100644 (file)
@@ -48,7 +48,7 @@
 #include <lprocfs_status.h>
 
 extern cfs_list_t obd_types;
-cfs_spinlock_t obd_types_lock;
+spinlock_t obd_types_lock;
 
 cfs_mem_cache_t *obd_device_cachep;
 cfs_mem_cache_t *obdo_cachep;
@@ -57,7 +57,7 @@ cfs_mem_cache_t *import_cachep;
 
 cfs_list_t      obd_zombie_imports;
 cfs_list_t      obd_zombie_exports;
-cfs_spinlock_t  obd_zombie_impexp_lock;
+spinlock_t  obd_zombie_impexp_lock;
 static void obd_zombie_impexp_notify(void);
 static void obd_zombie_export_add(struct obd_export *exp);
 static void obd_zombie_import_add(struct obd_import *imp);
@@ -98,19 +98,19 @@ static void obd_device_free(struct obd_device *obd)
 
 struct obd_type *class_search_type(const char *name)
 {
-        cfs_list_t *tmp;
-        struct obd_type *type;
+       cfs_list_t *tmp;
+       struct obd_type *type;
 
-        cfs_spin_lock(&obd_types_lock);
-        cfs_list_for_each(tmp, &obd_types) {
-                type = cfs_list_entry(tmp, struct obd_type, typ_chain);
-                if (strcmp(type->typ_name, name) == 0) {
-                        cfs_spin_unlock(&obd_types_lock);
-                        return type;
-                }
-        }
-        cfs_spin_unlock(&obd_types_lock);
-        return NULL;
+       spin_lock(&obd_types_lock);
+       cfs_list_for_each(tmp, &obd_types) {
+               type = cfs_list_entry(tmp, struct obd_type, typ_chain);
+               if (strcmp(type->typ_name, name) == 0) {
+                       spin_unlock(&obd_types_lock);
+                       return type;
+               }
+       }
+       spin_unlock(&obd_types_lock);
+       return NULL;
 }
 EXPORT_SYMBOL(class_search_type);
 
@@ -135,22 +135,22 @@ struct obd_type *class_get_type(const char *name)
         }
 #endif
         if (type) {
-                cfs_spin_lock(&type->obd_type_lock);
-                type->typ_refcnt++;
-                cfs_try_module_get(type->typ_dt_ops->o_owner);
-                cfs_spin_unlock(&type->obd_type_lock);
-        }
-        return type;
+               spin_lock(&type->obd_type_lock);
+               type->typ_refcnt++;
+               cfs_try_module_get(type->typ_dt_ops->o_owner);
+               spin_unlock(&type->obd_type_lock);
+       }
+       return type;
 }
 EXPORT_SYMBOL(class_get_type);
 
 void class_put_type(struct obd_type *type)
 {
-        LASSERT(type);
-        cfs_spin_lock(&type->obd_type_lock);
-        type->typ_refcnt--;
-        cfs_module_put(type->typ_dt_ops->o_owner);
-        cfs_spin_unlock(&type->obd_type_lock);
+       LASSERT(type);
+       spin_lock(&type->obd_type_lock);
+       type->typ_refcnt--;
+       cfs_module_put(type->typ_dt_ops->o_owner);
+       spin_unlock(&type->obd_type_lock);
 }
 EXPORT_SYMBOL(class_put_type);
 
@@ -191,7 +191,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
         if (md_ops)
                 *(type->typ_md_ops) = *md_ops;
         strcpy(type->typ_name, name);
-        cfs_spin_lock_init(&type->obd_type_lock);
+       spin_lock_init(&type->obd_type_lock);
 
 #ifdef LPROCFS
         type->typ_procroot = lprocfs_register(type->typ_name, proc_lustre_root,
@@ -209,9 +209,9 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
                         GOTO (failed, rc);
         }
 
-        cfs_spin_lock(&obd_types_lock);
-        cfs_list_add(&type->typ_chain, &obd_types);
-        cfs_spin_unlock(&obd_types_lock);
+       spin_lock(&obd_types_lock);
+       cfs_list_add(&type->typ_chain, &obd_types);
+       spin_unlock(&obd_types_lock);
 
         RETURN (0);
 
@@ -255,9 +255,9 @@ int class_unregister_type(const char *name)
         if (type->typ_lu)
                 lu_device_type_fini(type->typ_lu);
 
-        cfs_spin_lock(&obd_types_lock);
-        cfs_list_del(&type->typ_chain);
-        cfs_spin_unlock(&obd_types_lock);
+       spin_lock(&obd_types_lock);
+       cfs_list_del(&type->typ_chain);
+       spin_unlock(&obd_types_lock);
         OBD_FREE(type->typ_name, strlen(name) + 1);
         if (type->typ_dt_ops != NULL)
                 OBD_FREE_PTR(type->typ_dt_ops);
@@ -305,7 +305,7 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
 
         LASSERT(newdev->obd_magic == OBD_DEVICE_MAGIC);
 
-        cfs_write_lock(&obd_dev_lock);
+       write_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
 
@@ -337,7 +337,7 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
                         obd_devs[i] = result;
                 }
         }
-        cfs_write_unlock(&obd_dev_lock);
+       write_unlock(&obd_dev_lock);
 
         if (result == NULL && i >= class_devno_max()) {
                 CERROR("all %u OBD devices used, increase MAX_OBD_DEVICES\n",
@@ -372,9 +372,9 @@ void class_release_dev(struct obd_device *obd)
         CDEBUG(D_INFO, "Release obd device %s at %d obd_type name =%s\n",
                obd->obd_name, obd->obd_minor, obd->obd_type->typ_name);
 
-        cfs_write_lock(&obd_dev_lock);
+       write_lock(&obd_dev_lock);
         obd_devs[obd->obd_minor] = NULL;
-        cfs_write_unlock(&obd_dev_lock);
+       write_unlock(&obd_dev_lock);
         obd_device_free(obd);
 
         class_put_type(obd_type);
@@ -387,7 +387,7 @@ int class_name2dev(const char *name)
         if (!name)
                 return -1;
 
-        cfs_read_lock(&obd_dev_lock);
+       read_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
 
@@ -396,13 +396,13 @@ int class_name2dev(const char *name)
                            out any references */
                         LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
                         if (obd->obd_attached) {
-                                cfs_read_unlock(&obd_dev_lock);
+                               read_unlock(&obd_dev_lock);
                                 return i;
                         }
                         break;
                 }
         }
-        cfs_read_unlock(&obd_dev_lock);
+       read_unlock(&obd_dev_lock);
 
         return -1;
 }
@@ -422,17 +422,17 @@ int class_uuid2dev(struct obd_uuid *uuid)
 {
         int i;
 
-        cfs_read_lock(&obd_dev_lock);
+       read_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
 
                 if (obd && obd_uuid_equals(uuid, &obd->obd_uuid)) {
                         LASSERT(obd->obd_magic == OBD_DEVICE_MAGIC);
-                        cfs_read_unlock(&obd_dev_lock);
+                       read_unlock(&obd_dev_lock);
                         return i;
                 }
         }
-        cfs_read_unlock(&obd_dev_lock);
+       read_unlock(&obd_dev_lock);
 
         return -1;
 }
@@ -481,7 +481,7 @@ void class_obd_list(void)
         char *status;
         int i;
 
-        cfs_read_lock(&obd_dev_lock);
+       read_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
 
@@ -500,7 +500,7 @@ void class_obd_list(void)
                          obd->obd_name, obd->obd_uuid.uuid,
                          cfs_atomic_read(&obd->obd_refcount));
         }
-        cfs_read_unlock(&obd_dev_lock);
+       read_unlock(&obd_dev_lock);
         return;
 }
 
@@ -513,7 +513,7 @@ struct obd_device * class_find_client_obd(struct obd_uuid *tgt_uuid,
 {
         int i;
 
-        cfs_read_lock(&obd_dev_lock);
+       read_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
 
@@ -525,12 +525,12 @@ struct obd_device * class_find_client_obd(struct obd_uuid *tgt_uuid,
                                             &obd->u.cli.cl_target_uuid) &&
                             ((grp_uuid)? obd_uuid_equals(grp_uuid,
                                                          &obd->obd_uuid) : 1)) {
-                                cfs_read_unlock(&obd_dev_lock);
+                               read_unlock(&obd_dev_lock);
                                 return obd;
                         }
                 }
         }
-        cfs_read_unlock(&obd_dev_lock);
+       read_unlock(&obd_dev_lock);
 
         return NULL;
 }
@@ -551,7 +551,7 @@ struct obd_device * class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
         else
                 return NULL;
 
-        cfs_read_lock(&obd_dev_lock);
+       read_lock(&obd_dev_lock);
         for (; i < class_devno_max(); i++) {
                 struct obd_device *obd = class_num2obd(i);
 
@@ -560,11 +560,11 @@ struct obd_device * class_devices_in_group(struct obd_uuid *grp_uuid, int *next)
                 if (obd_uuid_equals(grp_uuid, &obd->obd_uuid)) {
                         if (next != NULL)
                                 *next = i+1;
-                        cfs_read_unlock(&obd_dev_lock);
+                       read_unlock(&obd_dev_lock);
                         return obd;
                 }
         }
-        cfs_read_unlock(&obd_dev_lock);
+       read_unlock(&obd_dev_lock);
 
         return NULL;
 }
@@ -582,7 +582,7 @@ int class_notify_sptlrpc_conf(const char *fsname, int namelen)
 
         LASSERT(namelen > 0);
 
-        cfs_read_lock(&obd_dev_lock);
+       read_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 obd = class_num2obd(i);
 
@@ -601,15 +601,15 @@ int class_notify_sptlrpc_conf(const char *fsname, int namelen)
                         continue;
 
                 class_incref(obd, __FUNCTION__, obd);
-                cfs_read_unlock(&obd_dev_lock);
+               read_unlock(&obd_dev_lock);
                 rc2 = obd_set_info_async(NULL, obd->obd_self_export,
                                          sizeof(KEY_SPTLRPC_CONF),
                                          KEY_SPTLRPC_CONF, 0, NULL, NULL);
                 rc = rc ? rc : rc2;
                 class_decref(obd, __FUNCTION__, obd);
-                cfs_read_lock(&obd_dev_lock);
+               read_lock(&obd_dev_lock);
         }
-        cfs_read_unlock(&obd_dev_lock);
+       read_unlock(&obd_dev_lock);
         return rc;
 }
 EXPORT_SYMBOL(class_notify_sptlrpc_conf);
@@ -830,39 +830,39 @@ struct obd_export *class_new_export(struct obd_device *obd,
         cfs_atomic_set(&export->exp_locks_count, 0);
 #if LUSTRE_TRACKS_LOCK_EXP_REFS
         CFS_INIT_LIST_HEAD(&export->exp_locks_list);
-        cfs_spin_lock_init(&export->exp_locks_list_guard);
+       spin_lock_init(&export->exp_locks_list_guard);
 #endif
-        cfs_atomic_set(&export->exp_replay_count, 0);
-        export->exp_obd = obd;
-        CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies);
-        cfs_spin_lock_init(&export->exp_uncommitted_replies_lock);
-        CFS_INIT_LIST_HEAD(&export->exp_uncommitted_replies);
-        CFS_INIT_LIST_HEAD(&export->exp_req_replay_queue);
-        CFS_INIT_LIST_HEAD(&export->exp_handle.h_link);
-        CFS_INIT_LIST_HEAD(&export->exp_hp_rpcs);
+       cfs_atomic_set(&export->exp_replay_count, 0);
+       export->exp_obd = obd;
+       CFS_INIT_LIST_HEAD(&export->exp_outstanding_replies);
+       spin_lock_init(&export->exp_uncommitted_replies_lock);
+       CFS_INIT_LIST_HEAD(&export->exp_uncommitted_replies);
+       CFS_INIT_LIST_HEAD(&export->exp_req_replay_queue);
+       CFS_INIT_LIST_HEAD(&export->exp_handle.h_link);
+       CFS_INIT_LIST_HEAD(&export->exp_hp_rpcs);
        class_handle_hash(&export->exp_handle, &export_handle_ops);
        export->exp_last_request_time = cfs_time_current_sec();
-        cfs_spin_lock_init(&export->exp_lock);
-        cfs_spin_lock_init(&export->exp_rpc_lock);
-        CFS_INIT_HLIST_NODE(&export->exp_uuid_hash);
-        CFS_INIT_HLIST_NODE(&export->exp_nid_hash);
-        cfs_spin_lock_init(&export->exp_bl_list_lock);
-        CFS_INIT_LIST_HEAD(&export->exp_bl_list);
-
-        export->exp_sp_peer = LUSTRE_SP_ANY;
-        export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
-        export->exp_client_uuid = *cluuid;
-        obd_init_export(export);
-
-        cfs_spin_lock(&obd->obd_dev_lock);
-         /* shouldn't happen, but might race */
-        if (obd->obd_stopping)
-                GOTO(exit_unlock, rc = -ENODEV);
-
-        hash = cfs_hash_getref(obd->obd_uuid_hash);
-        if (hash == NULL)
-                GOTO(exit_unlock, rc = -ENODEV);
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_lock_init(&export->exp_lock);
+       spin_lock_init(&export->exp_rpc_lock);
+       CFS_INIT_HLIST_NODE(&export->exp_uuid_hash);
+       CFS_INIT_HLIST_NODE(&export->exp_nid_hash);
+       spin_lock_init(&export->exp_bl_list_lock);
+       CFS_INIT_LIST_HEAD(&export->exp_bl_list);
+
+       export->exp_sp_peer = LUSTRE_SP_ANY;
+       export->exp_flvr.sf_rpc = SPTLRPC_FLVR_INVALID;
+       export->exp_client_uuid = *cluuid;
+       obd_init_export(export);
+
+       spin_lock(&obd->obd_dev_lock);
+       /* shouldn't happen, but might race */
+       if (obd->obd_stopping)
+               GOTO(exit_unlock, rc = -ENODEV);
+
+       hash = cfs_hash_getref(obd->obd_uuid_hash);
+       if (hash == NULL)
+               GOTO(exit_unlock, rc = -ENODEV);
+       spin_unlock(&obd->obd_dev_lock);
 
         if (!obd_uuid_equals(cluuid, &obd->obd_uuid)) {
                 rc = cfs_hash_add_unique(hash, cluuid, &export->exp_uuid_hash);
@@ -873,7 +873,7 @@ struct obd_export *class_new_export(struct obd_device *obd,
                 }
         }
 
-        cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
         if (obd->obd_stopping) {
                 cfs_hash_del(hash, cluuid, &export->exp_uuid_hash);
                 GOTO(exit_unlock, rc = -ENODEV);
@@ -884,12 +884,12 @@ struct obd_export *class_new_export(struct obd_device *obd,
         cfs_list_add_tail(&export->exp_obd_chain_timed,
                           &export->exp_obd->obd_exports_timed);
         export->exp_obd->obd_num_exports++;
-        cfs_spin_unlock(&obd->obd_dev_lock);
-        cfs_hash_putref(hash);
-        RETURN(export);
+       spin_unlock(&obd->obd_dev_lock);
+       cfs_hash_putref(hash);
+       RETURN(export);
 
 exit_unlock:
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 exit_err:
         if (hash)
                 cfs_hash_putref(hash);
@@ -903,20 +903,20 @@ EXPORT_SYMBOL(class_new_export);
 
 void class_unlink_export(struct obd_export *exp)
 {
-        class_handle_unhash(&exp->exp_handle);
+       class_handle_unhash(&exp->exp_handle);
 
-        cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
-        /* delete an uuid-export hashitem from hashtables */
-        if (!cfs_hlist_unhashed(&exp->exp_uuid_hash))
-                cfs_hash_del(exp->exp_obd->obd_uuid_hash,
-                             &exp->exp_client_uuid,
-                             &exp->exp_uuid_hash);
+       spin_lock(&exp->exp_obd->obd_dev_lock);
+       /* delete an uuid-export hashitem from hashtables */
+       if (!cfs_hlist_unhashed(&exp->exp_uuid_hash))
+               cfs_hash_del(exp->exp_obd->obd_uuid_hash,
+                            &exp->exp_client_uuid,
+                            &exp->exp_uuid_hash);
 
-        cfs_list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
-        cfs_list_del_init(&exp->exp_obd_chain_timed);
-        exp->exp_obd->obd_num_exports--;
-        cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
-        class_export_put(exp);
+       cfs_list_move(&exp->exp_obd_chain, &exp->exp_obd->obd_unlinked_exports);
+       cfs_list_del_init(&exp->exp_obd_chain_timed);
+       exp->exp_obd->obd_num_exports--;
+       spin_unlock(&exp->exp_obd->obd_dev_lock);
+       class_export_put(exp);
 }
 EXPORT_SYMBOL(class_unlink_export);
 
@@ -1014,11 +1014,11 @@ struct obd_import *class_new_import(struct obd_device *obd)
         CFS_INIT_LIST_HEAD(&imp->imp_replay_list);
         CFS_INIT_LIST_HEAD(&imp->imp_sending_list);
         CFS_INIT_LIST_HEAD(&imp->imp_delayed_list);
-        cfs_spin_lock_init(&imp->imp_lock);
-        imp->imp_last_success_conn = 0;
-        imp->imp_state = LUSTRE_IMP_NEW;
-        imp->imp_obd = class_incref(obd, "import", imp);
-        cfs_mutex_init(&imp->imp_sec_mutex);
+       spin_lock_init(&imp->imp_lock);
+       imp->imp_last_success_conn = 0;
+       imp->imp_state = LUSTRE_IMP_NEW;
+       imp->imp_obd = class_incref(obd, "import", imp);
+       mutex_init(&imp->imp_sec_mutex);
         cfs_waitq_init(&imp->imp_recovery_waitq);
 
         cfs_atomic_set(&imp->imp_refcount, 2);
@@ -1041,15 +1041,15 @@ EXPORT_SYMBOL(class_new_import);
 
 void class_destroy_import(struct obd_import *import)
 {
-        LASSERT(import != NULL);
-        LASSERT(import != LP_POISON);
+       LASSERT(import != NULL);
+       LASSERT(import != LP_POISON);
 
-        class_handle_unhash(&import->imp_handle);
+       class_handle_unhash(&import->imp_handle);
 
-        cfs_spin_lock(&import->imp_lock);
-        import->imp_generation++;
-        cfs_spin_unlock(&import->imp_lock);
-        class_import_put(import);
+       spin_lock(&import->imp_lock);
+       import->imp_generation++;
+       spin_unlock(&import->imp_lock);
+       class_import_put(import);
 }
 EXPORT_SYMBOL(class_destroy_import);
 
@@ -1057,7 +1057,7 @@ EXPORT_SYMBOL(class_destroy_import);
 
 void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
 {
-        cfs_spin_lock(&exp->exp_locks_list_guard);
+       spin_lock(&exp->exp_locks_list_guard);
 
         LASSERT(lock->l_exp_refs_nr >= 0);
 
@@ -1072,13 +1072,13 @@ void __class_export_add_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
         }
         CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
                lock, exp, lock->l_exp_refs_nr);
-        cfs_spin_unlock(&exp->exp_locks_list_guard);
+       spin_unlock(&exp->exp_locks_list_guard);
 }
 EXPORT_SYMBOL(__class_export_add_lock_ref);
 
 void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
 {
-        cfs_spin_lock(&exp->exp_locks_list_guard);
+       spin_lock(&exp->exp_locks_list_guard);
         LASSERT(lock->l_exp_refs_nr > 0);
         if (lock->l_exp_refs_target != exp) {
                 LCONSOLE_WARN("lock %p, "
@@ -1091,7 +1091,7 @@ void __class_export_del_lock_ref(struct obd_export *exp, struct ldlm_lock *lock)
         }
         CDEBUG(D_INFO, "lock = %p, export = %p, refs = %u\n",
                lock, exp, lock->l_exp_refs_nr);
-        cfs_spin_unlock(&exp->exp_locks_list_guard);
+       spin_unlock(&exp->exp_locks_list_guard);
 }
 EXPORT_SYMBOL(__class_export_del_lock_ref);
 #endif
@@ -1125,35 +1125,35 @@ EXPORT_SYMBOL(class_connect);
 /* if export is involved in recovery then clean up related things */
 void class_export_recovery_cleanup(struct obd_export *exp)
 {
-        struct obd_device *obd = exp->exp_obd;
-
-        cfs_spin_lock(&obd->obd_recovery_task_lock);
-        if (exp->exp_delayed)
-                obd->obd_delayed_clients--;
-        if (obd->obd_recovering && exp->exp_in_recovery) {
-                cfs_spin_lock(&exp->exp_lock);
-                exp->exp_in_recovery = 0;
-                cfs_spin_unlock(&exp->exp_lock);
-                LASSERT_ATOMIC_POS(&obd->obd_connected_clients);
-                cfs_atomic_dec(&obd->obd_connected_clients);
-        }
-        cfs_spin_unlock(&obd->obd_recovery_task_lock);
-        /** Cleanup req replay fields */
-        if (exp->exp_req_replay_needed) {
-                cfs_spin_lock(&exp->exp_lock);
-                exp->exp_req_replay_needed = 0;
-                cfs_spin_unlock(&exp->exp_lock);
-                LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients));
-                cfs_atomic_dec(&obd->obd_req_replay_clients);
-        }
-        /** Cleanup lock replay data */
-        if (exp->exp_lock_replay_needed) {
-                cfs_spin_lock(&exp->exp_lock);
-                exp->exp_lock_replay_needed = 0;
-                cfs_spin_unlock(&exp->exp_lock);
-                LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients));
-                cfs_atomic_dec(&obd->obd_lock_replay_clients);
+       struct obd_device *obd = exp->exp_obd;
+
+       spin_lock(&obd->obd_recovery_task_lock);
+       if (exp->exp_delayed)
+               obd->obd_delayed_clients--;
+       if (obd->obd_recovering && exp->exp_in_recovery) {
+               spin_lock(&exp->exp_lock);
+               exp->exp_in_recovery = 0;
+               spin_unlock(&exp->exp_lock);
+               LASSERT_ATOMIC_POS(&obd->obd_connected_clients);
+               cfs_atomic_dec(&obd->obd_connected_clients);
+       }
+       spin_unlock(&obd->obd_recovery_task_lock);
+       /** Cleanup req replay fields */
+       if (exp->exp_req_replay_needed) {
+               spin_lock(&exp->exp_lock);
+               exp->exp_req_replay_needed = 0;
+               spin_unlock(&exp->exp_lock);
+               LASSERT(cfs_atomic_read(&obd->obd_req_replay_clients));
+               cfs_atomic_dec(&obd->obd_req_replay_clients);
         }
+       /** Cleanup lock replay data */
+       if (exp->exp_lock_replay_needed) {
+               spin_lock(&exp->exp_lock);
+               exp->exp_lock_replay_needed = 0;
+               spin_unlock(&exp->exp_lock);
+               LASSERT(cfs_atomic_read(&obd->obd_lock_replay_clients));
+               cfs_atomic_dec(&obd->obd_lock_replay_clients);
+       }
 }
 
 /* This function removes 1-3 references from the export:
@@ -1172,10 +1172,10 @@ int class_disconnect(struct obd_export *export)
                 RETURN(-EINVAL);
         }
 
-        cfs_spin_lock(&export->exp_lock);
-        already_disconnected = export->exp_disconnected;
-        export->exp_disconnected = 1;
-        cfs_spin_unlock(&export->exp_lock);
+       spin_lock(&export->exp_lock);
+       already_disconnected = export->exp_disconnected;
+       export->exp_disconnected = 1;
+       spin_unlock(&export->exp_lock);
 
         /* class_cleanup(), abort_recovery(), and class_fail_export()
          * all end up in here, and if any of them race we shouldn't
@@ -1204,14 +1204,14 @@ EXPORT_SYMBOL(class_disconnect);
 /* Return non-zero for a fully connected export */
 int class_connected_export(struct obd_export *exp)
 {
-        if (exp) {
-                int connected;
-                cfs_spin_lock(&exp->exp_lock);
-                connected = (exp->exp_conn_cnt > 0);
-                cfs_spin_unlock(&exp->exp_lock);
-                return connected;
-        }
-        return 0;
+       if (exp) {
+               int connected;
+               spin_lock(&exp->exp_lock);
+               connected = (exp->exp_conn_cnt > 0);
+               spin_unlock(&exp->exp_lock);
+               return connected;
+       }
+       return 0;
 }
 EXPORT_SYMBOL(class_connected_export);
 
@@ -1230,9 +1230,9 @@ static void class_disconnect_export_list(cfs_list_t *list,
                 /* need for safe call CDEBUG after obd_disconnect */
                 class_export_get(exp);
 
-                cfs_spin_lock(&exp->exp_lock);
-                exp->exp_flags = flags;
-                cfs_spin_unlock(&exp->exp_lock);
+               spin_lock(&exp->exp_lock);
+               exp->exp_flags = flags;
+               spin_unlock(&exp->exp_lock);
 
                 if (obd_uuid_equals(&exp->exp_client_uuid,
                                     &exp->exp_obd->obd_uuid)) {
@@ -1263,15 +1263,15 @@ static void class_disconnect_export_list(cfs_list_t *list,
 
 void class_disconnect_exports(struct obd_device *obd)
 {
-        cfs_list_t work_list;
-        ENTRY;
+       cfs_list_t work_list;
+       ENTRY;
 
-        /* Move all of the exports from obd_exports to a work list, en masse. */
-        CFS_INIT_LIST_HEAD(&work_list);
-        cfs_spin_lock(&obd->obd_dev_lock);
-        cfs_list_splice_init(&obd->obd_exports, &work_list);
-        cfs_list_splice_init(&obd->obd_delayed_exports, &work_list);
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       /* Move all of the exports from obd_exports to a work list, en masse. */
+       CFS_INIT_LIST_HEAD(&work_list);
+       spin_lock(&obd->obd_dev_lock);
+       cfs_list_splice_init(&obd->obd_exports, &work_list);
+       cfs_list_splice_init(&obd->obd_delayed_exports, &work_list);
+       spin_unlock(&obd->obd_dev_lock);
 
         if (!cfs_list_empty(&work_list)) {
                 CDEBUG(D_HA, "OBD device %d (%p) has exports, "
@@ -1296,7 +1296,7 @@ void class_disconnect_stale_exports(struct obd_device *obd,
         ENTRY;
 
         CFS_INIT_LIST_HEAD(&work_list);
-        cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
        cfs_list_for_each_entry_safe(exp, n, &obd->obd_exports,
                                     exp_obd_chain) {
                 /* don't count self-export as client */
@@ -1309,13 +1309,13 @@ void class_disconnect_stale_exports(struct obd_device *obd,
                if (exp->exp_target_data.ted_lr_idx == -1)
                        continue;
 
-               cfs_spin_lock(&exp->exp_lock);
+               spin_lock(&exp->exp_lock);
                if (test_export(exp)) {
-                       cfs_spin_unlock(&exp->exp_lock);
+                       spin_unlock(&exp->exp_lock);
                        continue;
                }
                exp->exp_failed = 1;
-               cfs_spin_unlock(&exp->exp_lock);
+               spin_unlock(&exp->exp_lock);
 
                 cfs_list_move(&exp->exp_obd_chain, &work_list);
                 evicted++;
@@ -1325,7 +1325,7 @@ void class_disconnect_stale_exports(struct obd_device *obd,
                        libcfs_nid2str(exp->exp_connection->c_peer.nid));
                 print_export_data(exp, "EVICTING", 0);
         }
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
         if (evicted) {
                 LCONSOLE_WARN("%s: disconnecting %d stale clients\n",
@@ -1340,12 +1340,12 @@ EXPORT_SYMBOL(class_disconnect_stale_exports);
 
 void class_fail_export(struct obd_export *exp)
 {
-        int rc, already_failed;
+       int rc, already_failed;
 
-        cfs_spin_lock(&exp->exp_lock);
-        already_failed = exp->exp_failed;
-        exp->exp_failed = 1;
-        cfs_spin_unlock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
+       already_failed = exp->exp_failed;
+       exp->exp_failed = 1;
+       spin_unlock(&exp->exp_lock);
 
         if (already_failed) {
                 CDEBUG(D_HA, "disconnecting dead export %p/%s; skipping\n",
@@ -1388,23 +1388,23 @@ EXPORT_SYMBOL(obd_export_nid2str);
 int obd_export_evict_by_nid(struct obd_device *obd, const char *nid)
 {
        cfs_hash_t *nid_hash;
-        struct obd_export *doomed_exp = NULL;
-        int exports_evicted = 0;
+       struct obd_export *doomed_exp = NULL;
+       int exports_evicted = 0;
 
-        lnet_nid_t nid_key = libcfs_str2nid((char *)nid);
+       lnet_nid_t nid_key = libcfs_str2nid((char *)nid);
 
-       cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
        /* umount has run already, so evict thread should leave
         * its task to umount thread now */
        if (obd->obd_stopping) {
-               cfs_spin_unlock(&obd->obd_dev_lock);
+               spin_unlock(&obd->obd_dev_lock);
                return exports_evicted;
        }
        nid_hash = obd->obd_nid_hash;
        cfs_hash_getref(nid_hash);
-       cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
-        do {
+       do {
                doomed_exp = cfs_hash_lookup(nid_hash, &nid_key);
                 if (doomed_exp == NULL)
                         break;
@@ -1435,18 +1435,18 @@ EXPORT_SYMBOL(obd_export_evict_by_nid);
 int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid)
 {
        cfs_hash_t *uuid_hash;
-        struct obd_export *doomed_exp = NULL;
-        struct obd_uuid doomed_uuid;
-        int exports_evicted = 0;
+       struct obd_export *doomed_exp = NULL;
+       struct obd_uuid doomed_uuid;
+       int exports_evicted = 0;
 
-       cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
        if (obd->obd_stopping) {
-               cfs_spin_unlock(&obd->obd_dev_lock);
+               spin_unlock(&obd->obd_dev_lock);
                return exports_evicted;
        }
        uuid_hash = obd->obd_uuid_hash;
        cfs_hash_getref(uuid_hash);
-       cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
         obd_str2uuid(&doomed_uuid, uuid);
         if (obd_uuid_equals(&doomed_uuid, &obd->obd_uuid)) {
@@ -1479,20 +1479,20 @@ EXPORT_SYMBOL(class_export_dump_hook);
 #endif
 
 static void print_export_data(struct obd_export *exp, const char *status,
-                              int locks)
-{
-        struct ptlrpc_reply_state *rs;
-        struct ptlrpc_reply_state *first_reply = NULL;
-        int nreplies = 0;
-
-        cfs_spin_lock(&exp->exp_lock);
-        cfs_list_for_each_entry(rs, &exp->exp_outstanding_replies,
-                                rs_exp_list) {
-                if (nreplies == 0)
-                        first_reply = rs;
-                nreplies++;
-        }
-        cfs_spin_unlock(&exp->exp_lock);
+                             int locks)
+{
+       struct ptlrpc_reply_state *rs;
+       struct ptlrpc_reply_state *first_reply = NULL;
+       int nreplies = 0;
+
+       spin_lock(&exp->exp_lock);
+       cfs_list_for_each_entry(rs, &exp->exp_outstanding_replies,
+                               rs_exp_list) {
+               if (nreplies == 0)
+                       first_reply = rs;
+               nreplies++;
+       }
+       spin_unlock(&exp->exp_lock);
 
         CDEBUG(D_HA, "%s: %s %p %s %s %d (%d %d %d) %d %d %d %d: %p %s "LPU64"\n",
                exp->exp_obd->obd_name, status, exp, exp->exp_client_uuid.uuid,
@@ -1513,28 +1513,28 @@ void dump_exports(struct obd_device *obd, int locks)
 {
         struct obd_export *exp;
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
-                print_export_data(exp, "ACTIVE", locks);
-        cfs_list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
-                print_export_data(exp, "UNLINKED", locks);
-        cfs_list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
-                print_export_data(exp, "DELAYED", locks);
-        cfs_spin_unlock(&obd->obd_dev_lock);
-        cfs_spin_lock(&obd_zombie_impexp_lock);
-        cfs_list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
-                print_export_data(exp, "ZOMBIE", locks);
-        cfs_spin_unlock(&obd_zombie_impexp_lock);
+       spin_lock(&obd->obd_dev_lock);
+       cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain)
+               print_export_data(exp, "ACTIVE", locks);
+       cfs_list_for_each_entry(exp, &obd->obd_unlinked_exports, exp_obd_chain)
+               print_export_data(exp, "UNLINKED", locks);
+       cfs_list_for_each_entry(exp, &obd->obd_delayed_exports, exp_obd_chain)
+               print_export_data(exp, "DELAYED", locks);
+       spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd_zombie_impexp_lock);
+       cfs_list_for_each_entry(exp, &obd_zombie_exports, exp_obd_chain)
+               print_export_data(exp, "ZOMBIE", locks);
+       spin_unlock(&obd_zombie_impexp_lock);
 }
 EXPORT_SYMBOL(dump_exports);
 
 void obd_exports_barrier(struct obd_device *obd)
 {
-        int waited = 2;
-        LASSERT(cfs_list_empty(&obd->obd_exports));
-        cfs_spin_lock(&obd->obd_dev_lock);
-        while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
-                cfs_spin_unlock(&obd->obd_dev_lock);
+       int waited = 2;
+       LASSERT(cfs_list_empty(&obd->obd_exports));
+       spin_lock(&obd->obd_dev_lock);
+       while (!cfs_list_empty(&obd->obd_unlinked_exports)) {
+               spin_unlock(&obd->obd_dev_lock);
                 cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
                                                    cfs_time_seconds(waited));
                 if (waited > 5 && IS_PO2(waited)) {
@@ -1546,9 +1546,9 @@ void obd_exports_barrier(struct obd_device *obd)
                         dump_exports(obd, 1);
                 }
                 waited *= 2;
-                cfs_spin_lock(&obd->obd_dev_lock);
-        }
-        cfs_spin_unlock(&obd->obd_dev_lock);
+               spin_lock(&obd->obd_dev_lock);
+       }
+       spin_unlock(&obd->obd_dev_lock);
 }
 EXPORT_SYMBOL(obd_exports_barrier);
 
@@ -1560,12 +1560,12 @@ static int zombies_count = 0;
  */
 void obd_zombie_impexp_cull(void)
 {
-        struct obd_import *import;
-        struct obd_export *export;
-        ENTRY;
+       struct obd_import *import;
+       struct obd_export *export;
+       ENTRY;
 
-        do {
-                cfs_spin_lock(&obd_zombie_impexp_lock);
+       do {
+               spin_lock(&obd_zombie_impexp_lock);
 
                 import = NULL;
                 if (!cfs_list_empty(&obd_zombie_imports)) {
@@ -1583,35 +1583,35 @@ void obd_zombie_impexp_cull(void)
                         cfs_list_del_init(&export->exp_obd_chain);
                 }
 
-                cfs_spin_unlock(&obd_zombie_impexp_lock);
+               spin_unlock(&obd_zombie_impexp_lock);
 
-                if (import != NULL) {
-                        class_import_destroy(import);
-                        cfs_spin_lock(&obd_zombie_impexp_lock);
-                        zombies_count--;
-                        cfs_spin_unlock(&obd_zombie_impexp_lock);
-                }
+               if (import != NULL) {
+                       class_import_destroy(import);
+                       spin_lock(&obd_zombie_impexp_lock);
+                       zombies_count--;
+                       spin_unlock(&obd_zombie_impexp_lock);
+               }
 
-                if (export != NULL) {
-                        class_export_destroy(export);
-                        cfs_spin_lock(&obd_zombie_impexp_lock);
-                        zombies_count--;
-                        cfs_spin_unlock(&obd_zombie_impexp_lock);
-                }
+               if (export != NULL) {
+                       class_export_destroy(export);
+                       spin_lock(&obd_zombie_impexp_lock);
+                       zombies_count--;
+                       spin_unlock(&obd_zombie_impexp_lock);
+               }
 
-                cfs_cond_resched();
-        } while (import != NULL || export != NULL);
-        EXIT;
+               cfs_cond_resched();
+       } while (import != NULL || export != NULL);
+       EXIT;
 }
 
-static cfs_completion_t         obd_zombie_start;
-static cfs_completion_t         obd_zombie_stop;
-static unsigned long            obd_zombie_flags;
-static cfs_waitq_t              obd_zombie_waitq;
-static pid_t                    obd_zombie_pid;
+static struct completion       obd_zombie_start;
+static struct completion       obd_zombie_stop;
+static unsigned long           obd_zombie_flags;
+static cfs_waitq_t             obd_zombie_waitq;
+static pid_t                   obd_zombie_pid;
 
 enum {
-        OBD_ZOMBIE_STOP   = 1 << 1
+       OBD_ZOMBIE_STOP         = 0x0001,
 };
 
 /**
@@ -1619,45 +1619,45 @@ enum {
  */
 static int obd_zombie_impexp_check(void *arg)
 {
-        int rc;
+       int rc;
 
-        cfs_spin_lock(&obd_zombie_impexp_lock);
-        rc = (zombies_count == 0) &&
-             !cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
-        cfs_spin_unlock(&obd_zombie_impexp_lock);
+       spin_lock(&obd_zombie_impexp_lock);
+       rc = (zombies_count == 0) &&
+            !test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+       spin_unlock(&obd_zombie_impexp_lock);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 /**
  * Add export to the obd_zombe thread and notify it.
  */
 static void obd_zombie_export_add(struct obd_export *exp) {
-        cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
-        LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
-        cfs_list_del_init(&exp->exp_obd_chain);
-        cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
-        cfs_spin_lock(&obd_zombie_impexp_lock);
-        zombies_count++;
-        cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
-        cfs_spin_unlock(&obd_zombie_impexp_lock);
+       spin_lock(&exp->exp_obd->obd_dev_lock);
+       LASSERT(!cfs_list_empty(&exp->exp_obd_chain));
+       cfs_list_del_init(&exp->exp_obd_chain);
+       spin_unlock(&exp->exp_obd->obd_dev_lock);
+       spin_lock(&obd_zombie_impexp_lock);
+       zombies_count++;
+       cfs_list_add(&exp->exp_obd_chain, &obd_zombie_exports);
+       spin_unlock(&obd_zombie_impexp_lock);
 
-        obd_zombie_impexp_notify();
+       obd_zombie_impexp_notify();
 }
 
 /**
  * Add import to the obd_zombe thread and notify it.
  */
 static void obd_zombie_import_add(struct obd_import *imp) {
-        LASSERT(imp->imp_sec == NULL);
-        LASSERT(imp->imp_rq_pool == NULL);
-        cfs_spin_lock(&obd_zombie_impexp_lock);
-        LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
-        zombies_count++;
-        cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
-        cfs_spin_unlock(&obd_zombie_impexp_lock);
+       LASSERT(imp->imp_sec == NULL);
+       LASSERT(imp->imp_rq_pool == NULL);
+       spin_lock(&obd_zombie_impexp_lock);
+       LASSERT(cfs_list_empty(&imp->imp_zombie_chain));
+       zombies_count++;
+       cfs_list_add(&imp->imp_zombie_chain, &obd_zombie_imports);
+       spin_unlock(&obd_zombie_impexp_lock);
 
-        obd_zombie_impexp_notify();
+       obd_zombie_impexp_notify();
 }
 
 /**
@@ -1678,13 +1678,13 @@ static void obd_zombie_impexp_notify(void)
  */
 static int obd_zombie_is_idle(void)
 {
-        int rc;
+       int rc;
 
-        LASSERT(!cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
-        cfs_spin_lock(&obd_zombie_impexp_lock);
-        rc = (zombies_count == 0);
-        cfs_spin_unlock(&obd_zombie_impexp_lock);
-        return rc;
+       LASSERT(!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags));
+       spin_lock(&obd_zombie_impexp_lock);
+       rc = (zombies_count == 0);
+       spin_unlock(&obd_zombie_impexp_lock);
+       return rc;
 }
 
 /**
@@ -1708,18 +1708,19 @@ EXPORT_SYMBOL(obd_zombie_barrier);
  */
 static int obd_zombie_impexp_thread(void *unused)
 {
-        int rc;
+       int rc;
 
-        if ((rc = cfs_daemonize_ctxt("obd_zombid"))) {
-                cfs_complete(&obd_zombie_start);
-                RETURN(rc);
-        }
+       rc = cfs_daemonize_ctxt("obd_zombid");
+       if (rc != 0) {
+               complete(&obd_zombie_start);
+               RETURN(rc);
+       }
 
-        cfs_complete(&obd_zombie_start);
+       complete(&obd_zombie_start);
 
-        obd_zombie_pid = cfs_curproc_pid();
+       obd_zombie_pid = cfs_curproc_pid();
 
-        while(!cfs_test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
+       while (!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
                 struct l_wait_info lwi = { 0 };
 
                 l_wait_event(obd_zombie_waitq,
@@ -1733,9 +1734,9 @@ static int obd_zombie_impexp_thread(void *unused)
                 cfs_waitq_signal(&obd_zombie_waitq);
         }
 
-        cfs_complete(&obd_zombie_stop);
+       complete(&obd_zombie_stop);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 #else /* ! KERNEL */
@@ -1763,22 +1764,22 @@ int obd_zombie_impexp_kill(void *arg)
  */
 int obd_zombie_impexp_init(void)
 {
-        int rc;
+       int rc;
 
-        CFS_INIT_LIST_HEAD(&obd_zombie_imports);
-        CFS_INIT_LIST_HEAD(&obd_zombie_exports);
-        cfs_spin_lock_init(&obd_zombie_impexp_lock);
-        cfs_init_completion(&obd_zombie_start);
-        cfs_init_completion(&obd_zombie_stop);
-        cfs_waitq_init(&obd_zombie_waitq);
-        obd_zombie_pid = 0;
+       CFS_INIT_LIST_HEAD(&obd_zombie_imports);
+       CFS_INIT_LIST_HEAD(&obd_zombie_exports);
+       spin_lock_init(&obd_zombie_impexp_lock);
+       init_completion(&obd_zombie_start);
+       init_completion(&obd_zombie_stop);
+       cfs_waitq_init(&obd_zombie_waitq);
+       obd_zombie_pid = 0;
 
 #ifdef __KERNEL__
-        rc = cfs_create_thread(obd_zombie_impexp_thread, NULL, 0);
-        if (rc < 0)
-                RETURN(rc);
+       rc = cfs_create_thread(obd_zombie_impexp_thread, NULL, 0);
+       if (rc < 0)
+               RETURN(rc);
 
-        cfs_wait_for_completion(&obd_zombie_start);
+       wait_for_completion(&obd_zombie_start);
 #else
 
         obd_zombie_impexp_work_cb =
@@ -1797,10 +1798,10 @@ int obd_zombie_impexp_init(void)
  */
 void obd_zombie_impexp_stop(void)
 {
-        cfs_set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
+       set_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
         obd_zombie_impexp_notify();
 #ifdef __KERNEL__
-        cfs_wait_for_completion(&obd_zombie_stop);
+       wait_for_completion(&obd_zombie_stop);
 #else
         liblustre_deregister_wait_callback(obd_zombie_impexp_work_cb);
         liblustre_deregister_idle_callback(obd_zombie_impexp_idle_cb);
index 3e36f46..605c039 100644 (file)
@@ -299,22 +299,22 @@ static __u32 idmap_lookup_gid(cfs_list_t *hash, int reverse, __u32 gid)
 }
 
 int lustre_idmap_add(struct lustre_idmap_table *t,
-                     uid_t ruid, uid_t luid,
-                     gid_t rgid, gid_t lgid)
+                    uid_t ruid, uid_t luid,
+                    gid_t rgid, gid_t lgid)
 {
-        struct lustre_idmap_entry *e0, *e1;
+       struct lustre_idmap_entry *e0, *e1;
 
-        LASSERT(t);
+       LASSERT(t);
 
-        cfs_spin_lock(&t->lit_lock);
-        e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
-        cfs_spin_unlock(&t->lit_lock);
-        if (!e0) {
-                e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
-                if (!e0)
-                        return -ENOMEM;
+       spin_lock(&t->lit_lock);
+       e0 = idmap_search_entry(t, ruid, luid, rgid, lgid);
+       spin_unlock(&t->lit_lock);
+       if (!e0) {
+               e0 = idmap_entry_alloc(ruid, luid, rgid, lgid);
+               if (!e0)
+                       return -ENOMEM;
 
-                cfs_spin_lock(&t->lit_lock);
+               spin_lock(&t->lit_lock);
                 e1 = idmap_search_entry(t, ruid, luid, rgid, lgid);
                 if (e1 == NULL) {
                         cfs_list_add_tail(&e0->lie_rmt_uid_hash,
@@ -330,7 +330,7 @@ int lustre_idmap_add(struct lustre_idmap_table *t,
                                           &t->lit_idmaps[LCL_GIDMAP_IDX]
                                           [lustre_idmap_hashfunc(lgid)]);
                 }
-                cfs_spin_unlock(&t->lit_lock);
+               spin_unlock(&t->lit_lock);
                 if (e1 != NULL) {
                         idmap_entry_free(e0);
                         if (IS_ERR(e1))
@@ -348,20 +348,20 @@ int lustre_idmap_del(struct lustre_idmap_table *t,
                     uid_t ruid, uid_t luid,
                     gid_t rgid, gid_t lgid)
 {
-        struct lustre_idmap_entry *e;
-        int rc = 0;
+       struct lustre_idmap_entry *e;
+       int rc = 0;
 
-        LASSERT(t);
+       LASSERT(t);
 
-        cfs_spin_lock(&t->lit_lock);
-        e = idmap_search_entry(t, ruid, luid, rgid, lgid);
-        if (IS_ERR(e))
-                rc = PTR_ERR(e);
-        else if (e)
-                idmap_entry_free(e);
-        cfs_spin_unlock(&t->lit_lock);
+       spin_lock(&t->lit_lock);
+       e = idmap_search_entry(t, ruid, luid, rgid, lgid);
+       if (IS_ERR(e))
+               rc = PTR_ERR(e);
+       else if (e)
+               idmap_entry_free(e);
+       spin_unlock(&t->lit_lock);
 
-        return rc;
+       return rc;
 }
 EXPORT_SYMBOL(lustre_idmap_del);
 
@@ -390,11 +390,11 @@ int lustre_idmap_lookup_uid(struct md_ucred *mu,
 
         hash = t->lit_idmaps[reverse ? LCL_UIDMAP_IDX : RMT_UIDMAP_IDX];
 
-        cfs_spin_lock(&t->lit_lock);
-        uid = idmap_lookup_uid(hash, reverse, uid);
-        cfs_spin_unlock(&t->lit_lock);
+       spin_lock(&t->lit_lock);
+       uid = idmap_lookup_uid(hash, reverse, uid);
+       spin_unlock(&t->lit_lock);
 
-        return uid;
+       return uid;
 }
 EXPORT_SYMBOL(lustre_idmap_lookup_uid);
 
@@ -422,11 +422,11 @@ int lustre_idmap_lookup_gid(struct md_ucred *mu, struct lustre_idmap_table *t,
 
         hash = t->lit_idmaps[reverse ? LCL_GIDMAP_IDX : RMT_GIDMAP_IDX];
 
-        cfs_spin_lock(&t->lit_lock);
-        gid = idmap_lookup_gid(hash, reverse, gid);
-        cfs_spin_unlock(&t->lit_lock);
+       spin_lock(&t->lit_lock);
+       gid = idmap_lookup_gid(hash, reverse, gid);
+       spin_unlock(&t->lit_lock);
 
-        return gid;
+       return gid;
 }
 EXPORT_SYMBOL(lustre_idmap_lookup_gid);
 
@@ -439,12 +439,12 @@ struct lustre_idmap_table *lustre_idmap_init(void)
         if(unlikely(t == NULL))
                 return (ERR_PTR(-ENOMEM));
 
-        cfs_spin_lock_init(&t->lit_lock);
-        for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
-                for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
-                        CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
+       spin_lock_init(&t->lit_lock);
+       for (i = 0; i < ARRAY_SIZE(t->lit_idmaps); i++)
+               for (j = 0; j < ARRAY_SIZE(t->lit_idmaps[i]); j++)
+                       CFS_INIT_LIST_HEAD(&t->lit_idmaps[i][j]);
 
-        return t;
+       return t;
 }
 EXPORT_SYMBOL(lustre_idmap_init);
 
@@ -456,16 +456,16 @@ void lustre_idmap_fini(struct lustre_idmap_table *t)
         LASSERT(t);
 
         list = t->lit_idmaps[RMT_UIDMAP_IDX];
-        cfs_spin_lock(&t->lit_lock);
-        for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
-                while (!cfs_list_empty(&list[i])) {
-                        e = cfs_list_entry(list[i].next,
-                                           struct lustre_idmap_entry,
-                                           lie_rmt_uid_hash);
-                        idmap_entry_free(e);
-                }
-        cfs_spin_unlock(&t->lit_lock);
-
-        OBD_FREE_PTR(t);
+       spin_lock(&t->lit_lock);
+       for (i = 0; i < CFS_IDMAP_HASHSIZE; i++)
+               while (!cfs_list_empty(&list[i])) {
+                       e = cfs_list_entry(list[i].next,
+                                          struct lustre_idmap_entry,
+                                          lie_rmt_uid_hash);
+                       idmap_entry_free(e);
+               }
+       spin_unlock(&t->lit_lock);
+
+       OBD_FREE_PTR(t);
 }
 EXPORT_SYMBOL(lustre_idmap_fini);
index cae8397..85c994a 100644 (file)
@@ -271,7 +271,7 @@ static int obd_proc_read_health(char *page, char **start, off_t off,
         if (libcfs_catastrophe)
                 rc += snprintf(page + rc, count - rc, "LBUG\n");
 
-        cfs_read_lock(&obd_dev_lock);
+       read_lock(&obd_dev_lock);
         for (i = 0; i < class_devno_max(); i++) {
                 struct obd_device *obd;
 
@@ -284,7 +284,7 @@ static int obd_proc_read_health(char *page, char **start, off_t off,
                         continue;
 
                 class_incref(obd, __FUNCTION__, cfs_current());
-                cfs_read_unlock(&obd_dev_lock);
+               read_unlock(&obd_dev_lock);
 
                 if (obd_health_check(NULL, obd)) {
                         rc += snprintf(page + rc, count - rc,
@@ -292,9 +292,9 @@ static int obd_proc_read_health(char *page, char **start, off_t off,
                                        obd->obd_name);
                 }
                 class_decref(obd, __FUNCTION__, cfs_current());
-                cfs_read_lock(&obd_dev_lock);
+               read_lock(&obd_dev_lock);
         }
-        cfs_read_unlock(&obd_dev_lock);
+       read_unlock(&obd_dev_lock);
 
         if (rc == 0)
                 return snprintf(page, count, "healthy\n");
index 6892d1d..c16f143 100644 (file)
@@ -168,7 +168,7 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, obd_flag valid)
                 i_size_write(dst, src->o_size);
        /* optimum IO size */
        if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
-               dst->i_blkbits = cfs_ffs(src->o_blksize) - 1;
+               dst->i_blkbits = ffs(src->o_blksize) - 1;
 
        if (dst->i_blkbits < CFS_PAGE_SHIFT)
                dst->i_blkbits = CFS_PAGE_SHIFT;
@@ -213,7 +213,7 @@ void obdo_to_inode(struct inode *dst, struct obdo *src, obd_flag valid)
 
         }
        if (valid & OBD_MD_FLBLKSZ)
-               dst->i_blkbits = cfs_ffs(src->o_blksize)-1;
+               dst->i_blkbits = ffs(src->o_blksize)-1;
         if (valid & OBD_MD_FLMODE)
                 dst->i_mode = (dst->i_mode & S_IFMT) | (src->o_mode & ~S_IFMT);
         if (valid & OBD_MD_FLUID)
index 0c643b2..cd457f5 100644 (file)
@@ -65,8 +65,8 @@ struct llog_handle *llog_alloc_handle(void)
        if (loghandle == NULL)
                return ERR_PTR(-ENOMEM);
 
-       cfs_init_rwsem(&loghandle->lgh_lock);
-       cfs_spin_lock_init(&loghandle->lgh_hdr_lock);
+       init_rwsem(&loghandle->lgh_lock);
+       spin_lock_init(&loghandle->lgh_hdr_lock);
        CFS_INIT_LIST_HEAD(&loghandle->u.phd.phd_entry);
 
        return loghandle;
@@ -109,19 +109,19 @@ int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
                 RETURN(-EINVAL);
         }
 
-       cfs_spin_lock(&loghandle->lgh_hdr_lock);
+       spin_lock(&loghandle->lgh_hdr_lock);
        if (!ext2_clear_bit(index, llh->llh_bitmap)) {
-               cfs_spin_unlock(&loghandle->lgh_hdr_lock);
-                CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index);
-                RETURN(-ENOENT);
-        }
+               spin_unlock(&loghandle->lgh_hdr_lock);
+               CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index);
+               RETURN(-ENOENT);
+       }
 
-        llh->llh_count--;
+       llh->llh_count--;
 
-        if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
-            (llh->llh_count == 1) &&
-            (loghandle->lgh_last_idx == (LLOG_BITMAP_BYTES * 8) - 1)) {
-               cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+       if ((llh->llh_flags & LLOG_F_ZAP_WHEN_EMPTY) &&
+           (llh->llh_count == 1) &&
+           (loghandle->lgh_last_idx == (LLOG_BITMAP_BYTES * 8) - 1)) {
+               spin_unlock(&loghandle->lgh_hdr_lock);
                rc = llog_destroy(env, loghandle);
                if (rc < 0) {
                        CERROR("%s: can't destroy empty llog #"LPX64"#"LPX64
@@ -134,7 +134,7 @@ int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
                }
                RETURN(1);
        }
-       cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+       spin_unlock(&loghandle->lgh_hdr_lock);
 
        rc = llog_write(env, loghandle, &llh->llh_hdr, NULL, 0, NULL, 0);
        if (rc < 0) {
@@ -148,10 +148,10 @@ int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
        }
        RETURN(0);
 out_err:
-       cfs_spin_lock(&loghandle->lgh_hdr_lock);
+       spin_lock(&loghandle->lgh_hdr_lock);
        ext2_set_bit(index, llh->llh_bitmap);
        llh->llh_count++;
-       cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+       spin_unlock(&loghandle->lgh_hdr_lock);
        return rc;
 }
 EXPORT_SYMBOL(llog_cancel_rec);
@@ -411,7 +411,7 @@ static int llog_process_thread_daemonize(void *arg)
 
        lu_env_fini(&env);
 out:
-       cfs_complete(&lpi->lpi_completion);
+       complete(&lpi->lpi_completion);
        return rc;
 }
 #endif
@@ -440,7 +440,7 @@ int llog_process_or_fork(const struct lu_env *env,
                /* The new thread can't use parent env,
                 * init the new one in llog_process_thread_daemonize. */
                lpi->lpi_env = NULL;
-               cfs_init_completion(&lpi->lpi_completion);
+               init_completion(&lpi->lpi_completion);
                rc = cfs_create_thread(llog_process_thread_daemonize, lpi,
                                       CFS_DAEMON_FLAGS);
                if (rc < 0) {
@@ -449,7 +449,7 @@ int llog_process_or_fork(const struct lu_env *env,
                        OBD_FREE_PTR(lpi);
                        RETURN(rc);
                }
-               cfs_wait_for_completion(&lpi->lpi_completion);
+               wait_for_completion(&lpi->lpi_completion);
        } else {
                lpi->lpi_env = env;
                llog_process_thread(lpi);
@@ -864,17 +864,17 @@ int llog_write(const struct lu_env *env, struct llog_handle *loghandle,
                if (rc)
                        GOTO(out_trans, rc);
 
-               cfs_down_write(&loghandle->lgh_lock);
+               down_write(&loghandle->lgh_lock);
                rc = llog_write_rec(env, loghandle, rec, reccookie,
                                    cookiecount, buf, idx, th);
-               cfs_up_write(&loghandle->lgh_lock);
+               up_write(&loghandle->lgh_lock);
 out_trans:
                dt_trans_stop(env, dt, th);
        } else { /* lvfs compatibility */
-               cfs_down_write(&loghandle->lgh_lock);
+               down_write(&loghandle->lgh_lock);
                rc = llog_write_rec(env, loghandle, rec, reccookie,
                                    cookiecount, buf, idx, NULL);
-               cfs_up_write(&loghandle->lgh_lock);
+               up_write(&loghandle->lgh_lock);
        }
        RETURN(rc);
 }
index 47266b2..89dbc74 100644 (file)
@@ -105,15 +105,15 @@ static int llog_cat_new_log(const struct lu_env *env,
         if (index == 0)
                 index = 1;
 
-       cfs_spin_lock(&loghandle->lgh_hdr_lock);
+       spin_lock(&loghandle->lgh_hdr_lock);
        llh->llh_count++;
-        if (ext2_set_bit(index, llh->llh_bitmap)) {
-                CERROR("argh, index %u already set in log bitmap?\n",
-                       index);
-               cfs_spin_unlock(&loghandle->lgh_hdr_lock);
-                LBUG(); /* should never happen */
-        }
-       cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+       if (ext2_set_bit(index, llh->llh_bitmap)) {
+               CERROR("argh, index %u already set in log bitmap?\n",
+                      index);
+               spin_unlock(&loghandle->lgh_hdr_lock);
+               LBUG(); /* should never happen */
+       }
+       spin_unlock(&loghandle->lgh_hdr_lock);
 
         cathandle->lgh_last_idx = index;
         llh->llh_tail.lrt_index = index;
@@ -159,7 +159,7 @@ int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
        if (cathandle == NULL)
                RETURN(-EBADF);
 
-       cfs_down_write(&cathandle->lgh_lock);
+       down_write(&cathandle->lgh_lock);
        cfs_list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
                                u.phd.phd_entry) {
                struct llog_logid *cgl = &loghandle->lgh_id;
@@ -173,11 +173,11 @@ int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
                                continue;
                        }
                        loghandle->u.phd.phd_cat_handle = cathandle;
-                       cfs_up_write(&cathandle->lgh_lock);
+                       up_write(&cathandle->lgh_lock);
                        GOTO(out, rc = 0);
                }
        }
-       cfs_up_write(&cathandle->lgh_lock);
+       up_write(&cathandle->lgh_lock);
 
        rc = llog_open(env, cathandle->lgh_ctxt, &loghandle, logid, NULL,
                       LLOG_OPEN_EXISTS);
@@ -194,9 +194,9 @@ int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
                GOTO(out, rc);
        }
 
-       cfs_down_write(&cathandle->lgh_lock);
+       down_write(&cathandle->lgh_lock);
        cfs_list_add(&loghandle->u.phd.phd_entry, &cathandle->u.chd.chd_head);
-       cfs_up_write(&cathandle->lgh_lock);
+       up_write(&cathandle->lgh_lock);
 
        loghandle->u.phd.phd_cat_handle = cathandle;
        loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
@@ -278,39 +278,39 @@ static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
         struct llog_handle *loghandle = NULL;
         ENTRY;
 
-        cfs_down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
+       down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
         loghandle = cathandle->u.chd.chd_current_log;
         if (loghandle) {
                struct llog_log_hdr *llh;
 
-               cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+               down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
                llh = loghandle->lgh_hdr;
                if (llh == NULL ||
                    loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
-                        cfs_up_read(&cathandle->lgh_lock);
+                       up_read(&cathandle->lgh_lock);
                         RETURN(loghandle);
                 } else {
-                        cfs_up_write(&loghandle->lgh_lock);
+                       up_write(&loghandle->lgh_lock);
                 }
         }
-        cfs_up_read(&cathandle->lgh_lock);
+       up_read(&cathandle->lgh_lock);
 
        /* time to use next log */
 
        /* first, we have to make sure the state hasn't changed */
-       cfs_down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
+       down_write_nested(&cathandle->lgh_lock, LLOGH_CAT);
        loghandle = cathandle->u.chd.chd_current_log;
        if (loghandle) {
                struct llog_log_hdr *llh;
 
-               cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+               down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
                llh = loghandle->lgh_hdr;
                LASSERT(llh);
                 if (loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
-                        cfs_up_write(&cathandle->lgh_lock);
+                       up_write(&cathandle->lgh_lock);
                         RETURN(loghandle);
                 } else {
-                        cfs_up_write(&loghandle->lgh_lock);
+                       up_write(&loghandle->lgh_lock);
                 }
         }
 
@@ -319,8 +319,8 @@ static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
        loghandle = cathandle->u.chd.chd_next_log;
        cathandle->u.chd.chd_current_log = loghandle;
        cathandle->u.chd.chd_next_log = NULL;
-       cfs_down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
-       cfs_up_write(&cathandle->lgh_lock);
+       down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
+       up_write(&cathandle->lgh_lock);
        LASSERT(loghandle);
        RETURN(loghandle);
 }
@@ -346,7 +346,7 @@ int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
        if (!llog_exist(loghandle)) {
                rc = llog_cat_new_log(env, cathandle, loghandle, th);
                if (rc < 0) {
-                       cfs_up_write(&loghandle->lgh_lock);
+                       up_write(&loghandle->lgh_lock);
                        RETURN(rc);
                }
        }
@@ -354,7 +354,7 @@ int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
        rc = llog_write_rec(env, loghandle, rec, reccookie, 1, buf, -1, th);
         if (rc < 0)
                 CERROR("llog_write_rec %d: lh=%p\n", rc, loghandle);
-        cfs_up_write(&loghandle->lgh_lock);
+       up_write(&loghandle->lgh_lock);
         if (rc == -ENOSPC) {
                /* try to use next log */
                loghandle = llog_cat_current_log(cathandle, th);
@@ -363,7 +363,7 @@ int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
                if (!llog_exist(loghandle)) {
                        rc = llog_cat_new_log(env, cathandle, loghandle, th);
                        if (rc < 0) {
-                               cfs_up_write(&loghandle->lgh_lock);
+                               up_write(&loghandle->lgh_lock);
                                RETURN(rc);
                        }
                }
@@ -372,7 +372,7 @@ int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
                                    -1, th);
                if (rc < 0)
                        CERROR("llog_write_rec %d: lh=%p\n", rc, loghandle);
-               cfs_up_write(&loghandle->lgh_lock);
+               up_write(&loghandle->lgh_lock);
        }
 
        RETURN(rc);
@@ -390,7 +390,7 @@ int llog_cat_declare_add_rec(const struct lu_env *env,
 
        if (cathandle->u.chd.chd_current_log == NULL) {
                /* declare new plain llog */
-               cfs_down_write(&cathandle->lgh_lock);
+               down_write(&cathandle->lgh_lock);
                if (cathandle->u.chd.chd_current_log == NULL) {
                        rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
                                       NULL, NULL, LLOG_OPEN_NEW);
@@ -400,10 +400,10 @@ int llog_cat_declare_add_rec(const struct lu_env *env,
                                                  &cathandle->u.chd.chd_head);
                        }
                }
-               cfs_up_write(&cathandle->lgh_lock);
+               up_write(&cathandle->lgh_lock);
        } else if (cathandle->u.chd.chd_next_log == NULL) {
                /* declare next plain llog */
-               cfs_down_write(&cathandle->lgh_lock);
+               down_write(&cathandle->lgh_lock);
                if (cathandle->u.chd.chd_next_log == NULL) {
                        rc = llog_open(env, cathandle->lgh_ctxt, &loghandle,
                                       NULL, NULL, LLOG_OPEN_NEW);
@@ -413,7 +413,7 @@ int llog_cat_declare_add_rec(const struct lu_env *env,
                                                  &cathandle->u.chd.chd_head);
                        }
                }
-               cfs_up_write(&cathandle->lgh_lock);
+               up_write(&cathandle->lgh_lock);
        }
        if (rc)
                GOTO(out, rc);
@@ -519,10 +519,10 @@ int llog_cat_cancel_records(const struct lu_env *env,
                lrc = llog_cancel_rec(env, loghandle, cookies->lgc_index);
                if (lrc == 1) {          /* log has been destroyed */
                        index = loghandle->u.phd.phd_cookie.lgc_index;
-                       cfs_down_write(&cathandle->lgh_lock);
+                       down_write(&cathandle->lgh_lock);
                        if (cathandle->u.chd.chd_current_log == loghandle)
                                cathandle->u.chd.chd_current_log = NULL;
-                       cfs_up_write(&cathandle->lgh_lock);
+                       up_write(&cathandle->lgh_lock);
                        llog_close(env, loghandle);
 
                        LASSERT(index);
index 5c91774..05330f8 100644 (file)
@@ -44,7 +44,7 @@ struct llog_process_info {
         void               *lpi_catdata;
         int                 lpi_rc;
         int                 lpi_flags;
-        cfs_completion_t    lpi_completion;
+       struct completion       lpi_completion;
        const struct lu_env     *lpi_env;
 
 };
index a20a077..5c1c714 100644 (file)
@@ -257,10 +257,10 @@ static int llog_remove_log(const struct lu_env *env, struct llog_handle *cat,
                CDEBUG(D_IOCTL, "cannot destroy log\n");
                GOTO(out, rc);
        }
-       cfs_down_write(&cat->lgh_lock);
+       down_write(&cat->lgh_lock);
        if (cat->u.chd.chd_current_log == log)
                cat->u.chd.chd_current_log = NULL;
-       cfs_up_write(&cat->lgh_lock);
+       up_write(&cat->lgh_lock);
        llog_cat_set_first_idx(cat, index);
        rc = llog_cancel_rec(env, cat, index);
 out:
@@ -448,7 +448,7 @@ int llog_catalog_list(struct obd_device *obd, int count,
         if (!idarray)
                 RETURN(-ENOMEM);
 
-        cfs_mutex_lock(&obd->obd_olg.olg_cat_processing);
+       mutex_lock(&obd->obd_olg.olg_cat_processing);
         rc = llog_get_cat_list(obd, name, 0, count, idarray);
         if (rc)
                 GOTO(out, rc);
@@ -469,7 +469,7 @@ int llog_catalog_list(struct obd_device *obd, int count,
         }
 out:
         /* release semaphore */
-        cfs_mutex_unlock(&obd->obd_olg.olg_cat_processing);
+       mutex_unlock(&obd->obd_olg.olg_cat_processing);
 
         OBD_FREE_LARGE(idarray, size);
         RETURN(rc);
index e2fc8a3..fa75772 100644 (file)
@@ -328,14 +328,14 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
         /*The caller should make sure only 1 process access the lgh_last_idx,
          *Otherwise it might hit the assert.*/
         LASSERT(index < LLOG_BITMAP_SIZE(llh));
-       cfs_spin_lock(&loghandle->lgh_hdr_lock);
+       spin_lock(&loghandle->lgh_hdr_lock);
        if (ext2_set_bit(index, llh->llh_bitmap)) {
                CERROR("argh, index %u already set in log bitmap?\n", index);
-               cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+               spin_unlock(&loghandle->lgh_hdr_lock);
                LBUG(); /* should never happen */
        }
        llh->llh_count++;
-       cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+       spin_unlock(&loghandle->lgh_hdr_lock);
         llh->llh_tail.lrt_index = index;
 
         rc = llog_lvfs_write_blob(obd, file, &llh->llh_hdr, NULL, 0);
index 53b3fe6..b210256 100644 (file)
@@ -79,21 +79,21 @@ int __llog_ctxt_put(const struct lu_env *env, struct llog_ctxt *ctxt)
         struct obd_device *obd;
         int rc = 0;
 
-        cfs_spin_lock(&olg->olg_lock);
-        if (!cfs_atomic_dec_and_test(&ctxt->loc_refcount)) {
-                cfs_spin_unlock(&olg->olg_lock);
-                return rc;
-        }
-        olg->olg_ctxts[ctxt->loc_idx] = NULL;
-        cfs_spin_unlock(&olg->olg_lock);
+       spin_lock(&olg->olg_lock);
+       if (!cfs_atomic_dec_and_test(&ctxt->loc_refcount)) {
+               spin_unlock(&olg->olg_lock);
+               return rc;
+       }
+       olg->olg_ctxts[ctxt->loc_idx] = NULL;
+       spin_unlock(&olg->olg_lock);
 
-        if (ctxt->loc_lcm)
-                lcm_put(ctxt->loc_lcm);
+       if (ctxt->loc_lcm)
+               lcm_put(ctxt->loc_lcm);
 
-        obd = ctxt->loc_obd;
-        cfs_spin_lock(&obd->obd_dev_lock);
-        /* sync with llog ctxt user thread */
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       obd = ctxt->loc_obd;
+       spin_lock(&obd->obd_dev_lock);
+       /* sync with llog ctxt user thread */
+       spin_unlock(&obd->obd_dev_lock);
 
         /* obd->obd_starting is needed for the case of cleanup
          * in error case while obd is starting up. */
@@ -171,7 +171,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
         ctxt->loc_olg = olg;
         ctxt->loc_idx = index;
         ctxt->loc_logops = op;
-        cfs_mutex_init(&ctxt->loc_mutex);
+       mutex_init(&ctxt->loc_mutex);
         ctxt->loc_exp = class_export_get(disk_obd->obd_self_export);
         ctxt->loc_flags = LLOG_CTXT_FLAG_UNINITIALIZED;
 
index 80904c5..5470b5a 100644 (file)
@@ -434,15 +434,15 @@ static int llog_osd_write_rec(const struct lu_env *env,
        /* The caller should make sure only 1 process access the lgh_last_idx,
         * Otherwise it might hit the assert.*/
        LASSERT(index < LLOG_BITMAP_SIZE(llh));
-       cfs_spin_lock(&loghandle->lgh_hdr_lock);
+       spin_lock(&loghandle->lgh_hdr_lock);
        if (ext2_set_bit(index, llh->llh_bitmap)) {
                CERROR("%s: index %u already set in log bitmap\n",
                       o->do_lu.lo_dev->ld_obd->obd_name, index);
-               cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+               spin_unlock(&loghandle->lgh_hdr_lock);
                LBUG(); /* should never happen */
        }
        llh->llh_count++;
-       cfs_spin_unlock(&loghandle->lgh_hdr_lock);
+       spin_unlock(&loghandle->lgh_hdr_lock);
        llh->llh_tail.lrt_index = index;
 
        lgi->lgi_off = 0;
@@ -777,9 +777,9 @@ static int llog_osd_open(const struct lu_env *env, struct llog_handle *handle,
        if (IS_ERR(ls))
                RETURN(PTR_ERR(ls));
 
-       cfs_mutex_lock(&ls->ls_los_mutex);
+       mutex_lock(&ls->ls_los_mutex);
        los = dt_los_find(ls, FID_SEQ_LLOG);
-       cfs_mutex_unlock(&ls->ls_los_mutex);
+       mutex_unlock(&ls->ls_los_mutex);
        LASSERT(los);
        ls_device_put(env, ls);
 
@@ -1107,9 +1107,9 @@ static int llog_osd_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
        if (IS_ERR(ls))
                RETURN(PTR_ERR(ls));
 
-       cfs_mutex_lock(&ls->ls_los_mutex);
+       mutex_lock(&ls->ls_los_mutex);
        los = dt_los_find(ls, FID_SEQ_LLOG);
-       cfs_mutex_unlock(&ls->ls_los_mutex);
+       mutex_unlock(&ls->ls_los_mutex);
        if (los != NULL) {
                dt_los_put(los);
                local_oid_storage_fini(env, los);
index 3dd76e6..b2121dc 100644 (file)
@@ -36,7 +36,7 @@
 
 /* all initialized local storages on this node are linked on this */
 static CFS_LIST_HEAD(ls_list_head);
-static CFS_DEFINE_MUTEX(ls_list_mutex);
+static DEFINE_MUTEX(ls_list_mutex);
 
 static int ls_object_init(const struct lu_env *env, struct lu_object *o,
                          const struct lu_object_conf *unused)
@@ -122,9 +122,9 @@ struct ls_device *ls_find_dev(struct dt_device *dev)
 {
        struct ls_device *ls;
 
-       cfs_mutex_lock(&ls_list_mutex);
+       mutex_lock(&ls_list_mutex);
        ls = __ls_find_dev(dev);
-       cfs_mutex_unlock(&ls_list_mutex);
+       mutex_unlock(&ls_list_mutex);
 
        return ls;
 }
@@ -145,7 +145,7 @@ struct ls_device *ls_device_get(struct dt_device *dev)
 
        ENTRY;
 
-       cfs_mutex_lock(&ls_list_mutex);
+       mutex_lock(&ls_list_mutex);
        ls = __ls_find_dev(dev);
        if (ls)
                GOTO(out_ls, ls);
@@ -157,7 +157,7 @@ struct ls_device *ls_device_get(struct dt_device *dev)
 
        cfs_atomic_set(&ls->ls_refcount, 1);
        CFS_INIT_LIST_HEAD(&ls->ls_los_list);
-       cfs_mutex_init(&ls->ls_los_mutex);
+       mutex_init(&ls->ls_los_mutex);
 
        ls->ls_osd = dev;
 
@@ -169,7 +169,7 @@ struct ls_device *ls_device_get(struct dt_device *dev)
        /* finally add ls to the list */
        cfs_list_add(&ls->ls_linkage, &ls_list_head);
 out_ls:
-       cfs_mutex_unlock(&ls_list_mutex);
+       mutex_unlock(&ls_list_mutex);
        RETURN(ls);
 }
 
@@ -179,7 +179,7 @@ void ls_device_put(const struct lu_env *env, struct ls_device *ls)
        if (!cfs_atomic_dec_and_test(&ls->ls_refcount))
                return;
 
-       cfs_mutex_lock(&ls_list_mutex);
+       mutex_lock(&ls_list_mutex);
        if (cfs_atomic_read(&ls->ls_refcount) == 0) {
                LASSERT(cfs_list_empty(&ls->ls_los_list));
                cfs_list_del(&ls->ls_linkage);
@@ -187,7 +187,7 @@ void ls_device_put(const struct lu_env *env, struct ls_device *ls)
                lu_device_fini(&ls->ls_top_dev.dd_lu_dev);
                OBD_FREE_PTR(ls);
        }
-       cfs_mutex_unlock(&ls_list_mutex);
+       mutex_unlock(&ls_list_mutex);
 }
 
 /**
@@ -206,11 +206,11 @@ int local_object_fid_generate(const struct lu_env *env,
         * the latest generated fid atomically with
         * object creation see local_object_create() */
 
-       cfs_mutex_lock(&los->los_id_lock);
+       mutex_lock(&los->los_id_lock);
        fid->f_seq = los->los_seq;
        fid->f_oid = los->los_last_oid++;
        fid->f_ver = 0;
-       cfs_mutex_unlock(&los->los_id_lock);
+       mutex_unlock(&los->los_id_lock);
 
        return 0;
 }
@@ -277,7 +277,7 @@ int local_object_create(const struct lu_env *env,
        /* many threads can be updated this, serialize
         * them here to avoid the race where one thread
         * takes the value first, but writes it last */
-       cfs_mutex_lock(&los->los_id_lock);
+       mutex_lock(&los->los_id_lock);
 
        /* update local oid number on disk so that
         * we know the last one used after reboot */
@@ -289,7 +289,7 @@ int local_object_create(const struct lu_env *env,
        dti->dti_lb.lb_len = sizeof(losd);
        rc = dt_record_write(env, los->los_obj, &dti->dti_lb, &dti->dti_off,
                             th);
-       cfs_mutex_unlock(&los->los_id_lock);
+       mutex_unlock(&los->los_id_lock);
 
        RETURN(rc);
 }
@@ -638,7 +638,7 @@ int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev,
        if (IS_ERR(ls))
                RETURN(PTR_ERR(ls));
 
-       cfs_mutex_lock(&ls->ls_los_mutex);
+       mutex_lock(&ls->ls_los_mutex);
        *los = dt_los_find(ls, fid_seq(first_fid));
        if (*los != NULL)
                GOTO(out, rc = 0);
@@ -649,7 +649,7 @@ int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev,
                GOTO(out, rc = -ENOMEM);
 
        cfs_atomic_set(&(*los)->los_refcount, 1);
-       cfs_mutex_init(&(*los)->los_id_lock);
+       mutex_init(&(*los)->los_id_lock);
        (*los)->los_dev = &ls->ls_top_dev;
        cfs_atomic_inc(&ls->ls_refcount);
        cfs_list_add(&(*los)->los_list, &ls->ls_los_list);
@@ -788,7 +788,7 @@ out_los:
                (*los)->los_obj = o;
        }
 out:
-       cfs_mutex_unlock(&ls->ls_los_mutex);
+       mutex_unlock(&ls->ls_los_mutex);
        ls_device_put(env, ls);
        return rc;
 }
@@ -806,14 +806,14 @@ void local_oid_storage_fini(const struct lu_env *env,
        LASSERT(los->los_dev);
        ls = dt2ls_dev(los->los_dev);
 
-       cfs_mutex_lock(&ls->ls_los_mutex);
+       mutex_lock(&ls->ls_los_mutex);
        if (cfs_atomic_read(&los->los_refcount) == 0) {
                if (los->los_obj)
                        lu_object_put_nocache(env, &los->los_obj->do_lu);
                cfs_list_del(&los->los_list);
                OBD_FREE_PTR(los);
        }
-       cfs_mutex_unlock(&ls->ls_los_mutex);
+       mutex_unlock(&ls->ls_los_mutex);
        ls_device_put(env, ls);
 }
 EXPORT_SYMBOL(local_oid_storage_fini);
index 177ccbe..4aee0bd 100644 (file)
@@ -45,7 +45,7 @@ struct ls_device {
        struct dt_device        *ls_osd;
        /* list of all local OID storages */
        cfs_list_t               ls_los_list;
-       cfs_mutex_t              ls_los_mutex;
+       struct mutex             ls_los_mutex;
 };
 
 static inline struct ls_device *dt2ls_dev(struct dt_device *d)
index 6adbe83..07da9c6 100644 (file)
@@ -116,9 +116,9 @@ static void job_free(struct job_stat *job)
        LASSERT(atomic_read(&job->js_refcount) == 0);
        LASSERT(job->js_jobstats);
 
-       cfs_write_lock(&job->js_jobstats->ojs_lock);
+       write_lock(&job->js_jobstats->ojs_lock);
        cfs_list_del_init(&job->js_list);
-       cfs_write_unlock(&job->js_jobstats->ojs_lock);
+       write_unlock(&job->js_jobstats->ojs_lock);
 
        lprocfs_free_stats(&job->js_stats);
        OBD_FREE_PTR(job);
@@ -252,9 +252,9 @@ int lprocfs_job_stats_log(struct obd_device *obd, char *jobid,
                 * "job2" was initialized in job_alloc() already. LU-2163 */
        } else {
                LASSERT(cfs_list_empty(&job->js_list));
-               cfs_write_lock(&stats->ojs_lock);
+               write_lock(&stats->ojs_lock);
                cfs_list_add_tail(&job->js_list, &stats->ojs_list);
-               cfs_write_unlock(&stats->ojs_lock);
+               write_unlock(&stats->ojs_lock);
        }
 
 found:
@@ -288,7 +288,7 @@ static void *lprocfs_jobstats_seq_start(struct seq_file *p, loff_t *pos)
        loff_t off = *pos;
        struct job_stat *job;
 
-       cfs_read_lock(&stats->ojs_lock);
+       read_lock(&stats->ojs_lock);
        if (off == 0)
                return SEQ_START_TOKEN;
        off--;
@@ -303,7 +303,7 @@ static void lprocfs_jobstats_seq_stop(struct seq_file *p, void *v)
 {
        struct obd_job_stats *stats = p->private;
 
-       cfs_read_unlock(&stats->ojs_lock);
+       read_unlock(&stats->ojs_lock);
 }
 
 static void *lprocfs_jobstats_seq_next(struct seq_file *p, void *v, loff_t *pos)
@@ -516,7 +516,7 @@ int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
                RETURN(-ENOMEM);
 
        CFS_INIT_LIST_HEAD(&stats->ojs_list);
-       cfs_rwlock_init(&stats->ojs_lock);
+       rwlock_init(&stats->ojs_lock);
        stats->ojs_cntr_num = cntr_num;
        stats->ojs_cntr_init_fn = init_fn;
        stats->ojs_cleanup_interval = 600; /* 10 mins by default */
index de523b9..ba166e3 100644 (file)
@@ -60,7 +60,7 @@ CFS_MODULE_PARM(lprocfs_no_percpu_stats, "i", int, 0644,
 #define MAX_STRING_SIZE 128
 
 /* for bug 10866, global variable */
-CFS_DECLARE_RWSEM(_lprocfs_lock);
+DECLARE_RWSEM(_lprocfs_lock);
 EXPORT_SYMBOL(_lprocfs_lock);
 
 int lprocfs_single_release(struct inode *inode, struct file *file)
@@ -1057,7 +1057,7 @@ int lprocfs_rd_import(char *page, char **start, off_t off, int count,
                       "]\n"
                       "    connection:\n"
                       "       failover_nids: [");
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
         j = 0;
         cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
                 i += snprintf(page + i, count - i, "%s%s", j ? ", " : "",
@@ -1075,7 +1075,7 @@ int lprocfs_rd_import(char *page, char **start, off_t off, int count,
                      imp->imp_conn_cnt,
                      imp->imp_generation,
                      cfs_atomic_read(&imp->imp_inval_count));
-       cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
         lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret);
         if (ret.lc_count != 0) {
@@ -1405,7 +1405,7 @@ struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
        stats->ls_num = num;
        stats->ls_biggest_alloc_num = 1;
        stats->ls_flags = flags;
-       cfs_spin_lock_init(&stats->ls_lock);
+       spin_lock_init(&stats->ls_lock);
 
        percpusize = offsetof(struct lprocfs_percpu, lp_cntr[num]);
        if (num_entry > 1)
@@ -1985,9 +1985,9 @@ static int lprocfs_nid_stats_clear_write_cb(void *obj, void *data)
         CDEBUG(D_INFO,"refcnt %d\n", cfs_atomic_read(&stat->nid_exp_ref_count));
         if (cfs_atomic_read(&stat->nid_exp_ref_count) == 1) {
                 /* object has only hash references. */
-                cfs_spin_lock(&stat->nid_obd->obd_nid_lock);
-                cfs_list_move(&stat->nid_list, data);
-                cfs_spin_unlock(&stat->nid_obd->obd_nid_lock);
+               spin_lock(&stat->nid_obd->obd_nid_lock);
+               cfs_list_move(&stat->nid_list, data);
+               spin_unlock(&stat->nid_obd->obd_nid_lock);
                 RETURN(1);
         }
         /* we has reference to object - only clear data*/
@@ -2111,9 +2111,9 @@ int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
         exp->exp_nid_stats = new_stat;
         *newnid = 1;
         /* protect competitive add to list, not need locking on destroy */
-        cfs_spin_lock(&obd->obd_nid_lock);
-        cfs_list_add(&new_stat->nid_list, &obd->obd_nid_stats);
-        cfs_spin_unlock(&obd->obd_nid_lock);
+       spin_lock(&obd->obd_nid_lock);
+       cfs_list_add(&new_stat->nid_list, &obd->obd_nid_stats);
+       spin_unlock(&obd->obd_nid_lock);
 
         RETURN(rc);
 
@@ -2395,12 +2395,12 @@ EXPORT_SYMBOL(lprocfs_obd_seq_create);
 
 void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
 {
-        if (value >= OBD_HIST_MAX)
-                value = OBD_HIST_MAX - 1;
+       if (value >= OBD_HIST_MAX)
+               value = OBD_HIST_MAX - 1;
 
-        cfs_spin_lock(&oh->oh_lock);
-        oh->oh_buckets[value]++;
-        cfs_spin_unlock(&oh->oh_lock);
+       spin_lock(&oh->oh_lock);
+       oh->oh_buckets[value]++;
+       spin_unlock(&oh->oh_lock);
 }
 EXPORT_SYMBOL(lprocfs_oh_tally);
 
@@ -2428,9 +2428,9 @@ EXPORT_SYMBOL(lprocfs_oh_sum);
 
 void lprocfs_oh_clear(struct obd_histogram *oh)
 {
-        cfs_spin_lock(&oh->oh_lock);
-        memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
-        cfs_spin_unlock(&oh->oh_lock);
+       spin_lock(&oh->oh_lock);
+       memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
+       spin_unlock(&oh->oh_lock);
 }
 EXPORT_SYMBOL(lprocfs_oh_clear);
 
index 74c1cc9..8c7d253 100644 (file)
@@ -161,7 +161,7 @@ EXPORT_SYMBOL(lu_object_put);
  */
 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
 {
-       cfs_set_bit(LU_OBJECT_HEARD_BANSHEE,
+       set_bit(LU_OBJECT_HEARD_BANSHEE,
                    &o->lo_header->loh_flags);
        return lu_object_put(env, o);
 }
@@ -750,7 +750,7 @@ EXPORT_SYMBOL(lu_types_stop);
  * Global list of all sites on this node
  */
 static CFS_LIST_HEAD(lu_sites);
-static CFS_DEFINE_MUTEX(lu_sites_guard);
+static DEFINE_MUTEX(lu_sites_guard);
 
 /**
  * Global environment used by site shrinker.
@@ -921,18 +921,18 @@ cfs_hash_ops_t lu_site_hash_ops = {
 
 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
 {
-       cfs_spin_lock(&s->ls_ld_lock);
+       spin_lock(&s->ls_ld_lock);
        if (cfs_list_empty(&d->ld_linkage))
                cfs_list_add(&d->ld_linkage, &s->ls_ld_linkage);
-       cfs_spin_unlock(&s->ls_ld_lock);
+       spin_unlock(&s->ls_ld_lock);
 }
 EXPORT_SYMBOL(lu_dev_add_linkage);
 
 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
 {
-       cfs_spin_lock(&s->ls_ld_lock);
+       spin_lock(&s->ls_ld_lock);
        cfs_list_del_init(&d->ld_linkage);
-       cfs_spin_unlock(&s->ls_ld_lock);
+       spin_unlock(&s->ls_ld_lock);
 }
 EXPORT_SYMBOL(lu_dev_del_linkage);
 
@@ -1012,11 +1012,11 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
         lu_ref_add(&top->ld_reference, "site-top", s);
 
         CFS_INIT_LIST_HEAD(&s->ls_ld_linkage);
-        cfs_spin_lock_init(&s->ls_ld_lock);
+       spin_lock_init(&s->ls_ld_lock);
 
        lu_dev_add_linkage(s, top);
 
-        RETURN(0);
+       RETURN(0);
 }
 EXPORT_SYMBOL(lu_site_init);
 
@@ -1025,9 +1025,9 @@ EXPORT_SYMBOL(lu_site_init);
  */
 void lu_site_fini(struct lu_site *s)
 {
-        cfs_mutex_lock(&lu_sites_guard);
+       mutex_lock(&lu_sites_guard);
         cfs_list_del_init(&s->ls_linkage);
-        cfs_mutex_unlock(&lu_sites_guard);
+       mutex_unlock(&lu_sites_guard);
 
         if (s->ls_obj_hash != NULL) {
                 cfs_hash_putref(s->ls_obj_hash);
@@ -1052,11 +1052,11 @@ EXPORT_SYMBOL(lu_site_fini);
 int lu_site_init_finish(struct lu_site *s)
 {
         int result;
-        cfs_mutex_lock(&lu_sites_guard);
+       mutex_lock(&lu_sites_guard);
         result = lu_context_refill(&lu_shrink_env.le_ctx);
         if (result == 0)
                 cfs_list_add(&s->ls_linkage, &lu_sites);
-        cfs_mutex_unlock(&lu_sites_guard);
+       mutex_unlock(&lu_sites_guard);
         return result;
 }
 EXPORT_SYMBOL(lu_site_init_finish);
@@ -1293,7 +1293,7 @@ int lu_context_key_register(struct lu_context_key *key)
         LASSERT(key->lct_owner != NULL);
 
         result = -ENFILE;
-        cfs_spin_lock(&lu_keys_guard);
+       spin_lock(&lu_keys_guard);
         for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
                 if (lu_keys[i] == NULL) {
                         key->lct_index = i;
@@ -1305,8 +1305,8 @@ int lu_context_key_register(struct lu_context_key *key)
                         break;
                 }
         }
-        cfs_spin_unlock(&lu_keys_guard);
-        return result;
+       spin_unlock(&lu_keys_guard);
+       return result;
 }
 EXPORT_SYMBOL(lu_context_key_register);
 
@@ -1338,23 +1338,23 @@ static void key_fini(struct lu_context *ctx, int index)
  */
 void lu_context_key_degister(struct lu_context_key *key)
 {
-        LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
-        LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
+       LASSERT(cfs_atomic_read(&key->lct_used) >= 1);
+       LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
 
-        lu_context_key_quiesce(key);
+       lu_context_key_quiesce(key);
 
-        ++key_set_version;
-        cfs_spin_lock(&lu_keys_guard);
-        key_fini(&lu_shrink_env.le_ctx, key->lct_index);
-        if (lu_keys[key->lct_index]) {
-                lu_keys[key->lct_index] = NULL;
-                lu_ref_fini(&key->lct_reference);
-        }
-        cfs_spin_unlock(&lu_keys_guard);
+       ++key_set_version;
+       spin_lock(&lu_keys_guard);
+       key_fini(&lu_shrink_env.le_ctx, key->lct_index);
+       if (lu_keys[key->lct_index]) {
+               lu_keys[key->lct_index] = NULL;
+               lu_ref_fini(&key->lct_reference);
+       }
+       spin_unlock(&lu_keys_guard);
 
-        LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
-                 "key has instances: %d\n",
-                 cfs_atomic_read(&key->lct_used));
+       LASSERTF(cfs_atomic_read(&key->lct_used) == 1,
+                "key has instances: %d\n",
+                cfs_atomic_read(&key->lct_used));
 }
 EXPORT_SYMBOL(lu_context_key_degister);
 
@@ -1476,13 +1476,13 @@ void lu_context_key_quiesce(struct lu_context_key *key)
                 /*
                  * XXX memory barrier has to go here.
                  */
-                cfs_spin_lock(&lu_keys_guard);
-                cfs_list_for_each_entry(ctx, &lu_context_remembered,
-                                        lc_remember)
-                        key_fini(ctx, key->lct_index);
-                cfs_spin_unlock(&lu_keys_guard);
-                ++key_set_version;
-        }
+               spin_lock(&lu_keys_guard);
+               cfs_list_for_each_entry(ctx, &lu_context_remembered,
+                                       lc_remember)
+                       key_fini(ctx, key->lct_index);
+               spin_unlock(&lu_keys_guard);
+               ++key_set_version;
+       }
 }
 EXPORT_SYMBOL(lu_context_key_quiesce);
 
@@ -1567,13 +1567,13 @@ int lu_context_init(struct lu_context *ctx, __u32 tags)
 {
        int     rc;
 
-        memset(ctx, 0, sizeof *ctx);
-        ctx->lc_state = LCS_INITIALIZED;
-        ctx->lc_tags = tags;
-        if (tags & LCT_REMEMBER) {
-                cfs_spin_lock(&lu_keys_guard);
-                cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
-                cfs_spin_unlock(&lu_keys_guard);
+       memset(ctx, 0, sizeof *ctx);
+       ctx->lc_state = LCS_INITIALIZED;
+       ctx->lc_tags = tags;
+       if (tags & LCT_REMEMBER) {
+               spin_lock(&lu_keys_guard);
+               cfs_list_add(&ctx->lc_remember, &lu_context_remembered);
+               spin_unlock(&lu_keys_guard);
        } else {
                CFS_INIT_LIST_HEAD(&ctx->lc_remember);
        }
@@ -1599,10 +1599,10 @@ void lu_context_fini(struct lu_context *ctx)
                keys_fini(ctx);
 
        } else { /* could race with key degister */
-               cfs_spin_lock(&lu_keys_guard);
+               spin_lock(&lu_keys_guard);
                keys_fini(ctx);
                cfs_list_del_init(&ctx->lc_remember);
-               cfs_spin_unlock(&lu_keys_guard);
+               spin_unlock(&lu_keys_guard);
        }
 }
 EXPORT_SYMBOL(lu_context_fini);
@@ -1665,37 +1665,37 @@ __u32 lu_session_tags_default = 0;
 
 void lu_context_tags_update(__u32 tags)
 {
-        cfs_spin_lock(&lu_keys_guard);
-        lu_context_tags_default |= tags;
-        key_set_version ++;
-        cfs_spin_unlock(&lu_keys_guard);
+       spin_lock(&lu_keys_guard);
+       lu_context_tags_default |= tags;
+       key_set_version++;
+       spin_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_context_tags_update);
 
 void lu_context_tags_clear(__u32 tags)
 {
-        cfs_spin_lock(&lu_keys_guard);
-        lu_context_tags_default &= ~tags;
-        key_set_version ++;
-        cfs_spin_unlock(&lu_keys_guard);
+       spin_lock(&lu_keys_guard);
+       lu_context_tags_default &= ~tags;
+       key_set_version++;
+       spin_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_context_tags_clear);
 
 void lu_session_tags_update(__u32 tags)
 {
-        cfs_spin_lock(&lu_keys_guard);
-        lu_session_tags_default |= tags;
-        key_set_version ++;
-        cfs_spin_unlock(&lu_keys_guard);
+       spin_lock(&lu_keys_guard);
+       lu_session_tags_default |= tags;
+       key_set_version++;
+       spin_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_session_tags_update);
 
 void lu_session_tags_clear(__u32 tags)
 {
-        cfs_spin_lock(&lu_keys_guard);
-        lu_session_tags_default &= ~tags;
-        key_set_version ++;
-        cfs_spin_unlock(&lu_keys_guard);
+       spin_lock(&lu_keys_guard);
+       lu_session_tags_default &= ~tags;
+       key_set_version++;
+       spin_unlock(&lu_keys_guard);
 }
 EXPORT_SYMBOL(lu_session_tags_clear);
 
@@ -1844,7 +1844,7 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 
        CDEBUG(D_INODE, "Shrink %d objects\n", remain);
 
-        cfs_mutex_lock(&lu_sites_guard);
+       mutex_lock(&lu_sites_guard);
         cfs_list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
                 if (shrink_param(sc, nr_to_scan) != 0) {
                         remain = lu_site_purge(&lu_shrink_env, s, remain);
@@ -1862,7 +1862,7 @@ static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
                         break;
         }
         cfs_list_splice(&splice, lu_sites.prev);
-        cfs_mutex_unlock(&lu_sites_guard);
+       mutex_unlock(&lu_sites_guard);
 
         cached = (cached / 100) * sysctl_vfs_cache_pressure;
         if (shrink_param(sc, nr_to_scan) == 0)
@@ -1958,9 +1958,9 @@ int lu_global_init(void)
          * conservatively. This should not be too bad, because this
          * environment is global.
          */
-        cfs_mutex_lock(&lu_sites_guard);
+       mutex_lock(&lu_sites_guard);
         result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
-        cfs_mutex_unlock(&lu_sites_guard);
+       mutex_unlock(&lu_sites_guard);
         if (result != 0)
                 return result;
 
@@ -2014,9 +2014,9 @@ void lu_global_fini(void)
          * Tear shrinker environment down _after_ de-registering
          * lu_global_key, because the latter has a value in the former.
          */
-        cfs_mutex_lock(&lu_sites_guard);
+       mutex_lock(&lu_sites_guard);
         lu_env_fini(&lu_shrink_env);
-        cfs_mutex_unlock(&lu_sites_guard);
+       mutex_unlock(&lu_sites_guard);
 
         lu_ref_global_fini();
 }
index 73f8cc5..95f51e7 100644 (file)
  * Asserts a condition for a given lu_ref. Must be called with
  * lu_ref::lf_guard held.
  */
-#define REFASSERT(ref, expr)  do {                      \
-       struct lu_ref *__tmp = (ref);                   \
-                                                       \
-       if (unlikely(!(expr))) {                        \
-               lu_ref_print(__tmp);                    \
-               cfs_spin_unlock(&__tmp->lf_guard);      \
-               lu_ref_print_all();                     \
-               LASSERT(0);                             \
-               cfs_spin_lock(&__tmp->lf_guard);        \
-       }                                               \
+#define REFASSERT(ref, expr) do {                                      \
+       struct lu_ref *__tmp = (ref);                                   \
+                                                                       \
+       if (unlikely(!(expr))) {                                        \
+               lu_ref_print(__tmp);                                    \
+               spin_unlock(&__tmp->lf_guard);                          \
+               lu_ref_print_all();                                     \
+               LASSERT(0);                                             \
+               spin_lock(&__tmp->lf_guard);                            \
+       }                                                               \
 } while (0)
 
 struct lu_ref_link {
@@ -95,7 +95,7 @@ static struct lu_kmem_descr lu_ref_caches[] = {
  * Protected by lu_ref_refs_guard.
  */
 static CFS_LIST_HEAD(lu_ref_refs);
-static cfs_spinlock_t lu_ref_refs_guard;
+static spinlock_t lu_ref_refs_guard;
 static struct lu_ref lu_ref_marker = {
        .lf_guard   = DEFINE_SPINLOCK(lu_ref_marker.lf_guard),
         .lf_list    = CFS_LIST_HEAD_INIT(lu_ref_marker.lf_list),
@@ -121,41 +121,41 @@ static int lu_ref_is_marker(const struct lu_ref *ref)
 
 void lu_ref_print_all(void)
 {
-        struct lu_ref *ref;
-
-        cfs_spin_lock(&lu_ref_refs_guard);
-        cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
-                if (lu_ref_is_marker(ref))
-                        continue;
-
-                cfs_spin_lock(&ref->lf_guard);
-                lu_ref_print(ref);
-                cfs_spin_unlock(&ref->lf_guard);
-        }
-        cfs_spin_unlock(&lu_ref_refs_guard);
+       struct lu_ref *ref;
+
+       spin_lock(&lu_ref_refs_guard);
+       cfs_list_for_each_entry(ref, &lu_ref_refs, lf_linkage) {
+               if (lu_ref_is_marker(ref))
+                       continue;
+
+               spin_lock(&ref->lf_guard);
+               lu_ref_print(ref);
+               spin_unlock(&ref->lf_guard);
+       }
+       spin_unlock(&lu_ref_refs_guard);
 }
 EXPORT_SYMBOL(lu_ref_print_all);
 
 void lu_ref_init_loc(struct lu_ref *ref, const char *func, const int line)
 {
-        ref->lf_refs = 0;
-        ref->lf_func = func;
-        ref->lf_line = line;
-        cfs_spin_lock_init(&ref->lf_guard);
-        CFS_INIT_LIST_HEAD(&ref->lf_list);
-        cfs_spin_lock(&lu_ref_refs_guard);
-        cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
-        cfs_spin_unlock(&lu_ref_refs_guard);
+       ref->lf_refs = 0;
+       ref->lf_func = func;
+       ref->lf_line = line;
+       spin_lock_init(&ref->lf_guard);
+       CFS_INIT_LIST_HEAD(&ref->lf_list);
+       spin_lock(&lu_ref_refs_guard);
+       cfs_list_add(&ref->lf_linkage, &lu_ref_refs);
+       spin_unlock(&lu_ref_refs_guard);
 }
 EXPORT_SYMBOL(lu_ref_init_loc);
 
 void lu_ref_fini(struct lu_ref *ref)
 {
-        REFASSERT(ref, cfs_list_empty(&ref->lf_list));
-        REFASSERT(ref, ref->lf_refs == 0);
-        cfs_spin_lock(&lu_ref_refs_guard);
-        cfs_list_del_init(&ref->lf_linkage);
-        cfs_spin_unlock(&lu_ref_refs_guard);
+       REFASSERT(ref, cfs_list_empty(&ref->lf_list));
+       REFASSERT(ref, ref->lf_refs == 0);
+       spin_lock(&lu_ref_refs_guard);
+       cfs_list_del_init(&ref->lf_linkage);
+       spin_unlock(&lu_ref_refs_guard);
 }
 EXPORT_SYMBOL(lu_ref_fini);
 
@@ -173,21 +173,21 @@ static struct lu_ref_link *lu_ref_add_context(struct lu_ref *ref,
                         link->ll_ref    = ref;
                         link->ll_scope  = scope;
                         link->ll_source = source;
-                        cfs_spin_lock(&ref->lf_guard);
-                        cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
-                        ref->lf_refs++;
-                        cfs_spin_unlock(&ref->lf_guard);
-                }
-        }
-
-        if (link == NULL) {
-                cfs_spin_lock(&ref->lf_guard);
-                ref->lf_failed++;
-                cfs_spin_unlock(&ref->lf_guard);
-                link = ERR_PTR(-ENOMEM);
-        }
-
-        return link;
+                       spin_lock(&ref->lf_guard);
+                       cfs_list_add_tail(&link->ll_linkage, &ref->lf_list);
+                       ref->lf_refs++;
+                       spin_unlock(&ref->lf_guard);
+               }
+       }
+
+       if (link == NULL) {
+               spin_lock(&ref->lf_guard);
+               ref->lf_failed++;
+               spin_unlock(&ref->lf_guard);
+               link = ERR_PTR(-ENOMEM);
+       }
+
+       return link;
 }
 
 struct lu_ref_link *lu_ref_add(struct lu_ref *ref, const char *scope,
@@ -245,56 +245,56 @@ static struct lu_ref_link *lu_ref_find(struct lu_ref *ref, const char *scope,
 
 void lu_ref_del(struct lu_ref *ref, const char *scope, const void *source)
 {
-        struct lu_ref_link *link;
-
-        cfs_spin_lock(&ref->lf_guard);
-        link = lu_ref_find(ref, scope, source);
-        if (link != NULL) {
-                cfs_list_del(&link->ll_linkage);
-                ref->lf_refs--;
-                cfs_spin_unlock(&ref->lf_guard);
-                OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
-        } else {
-                REFASSERT(ref, ref->lf_failed > 0);
-                ref->lf_failed--;
-                cfs_spin_unlock(&ref->lf_guard);
-        }
+       struct lu_ref_link *link;
+
+       spin_lock(&ref->lf_guard);
+       link = lu_ref_find(ref, scope, source);
+       if (link != NULL) {
+               cfs_list_del(&link->ll_linkage);
+               ref->lf_refs--;
+               spin_unlock(&ref->lf_guard);
+               OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
+       } else {
+               REFASSERT(ref, ref->lf_failed > 0);
+               ref->lf_failed--;
+               spin_unlock(&ref->lf_guard);
+       }
 }
 EXPORT_SYMBOL(lu_ref_del);
 
 void lu_ref_set_at(struct lu_ref *ref, struct lu_ref_link *link,
-                   const char *scope,
-                   const void *source0, const void *source1)
+                  const char *scope,
+                  const void *source0, const void *source1)
 {
-        cfs_spin_lock(&ref->lf_guard);
-        if (link != ERR_PTR(-ENOMEM)) {
-                REFASSERT(ref, link->ll_ref == ref);
-                REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
-                link->ll_source = source1;
-        } else {
-                REFASSERT(ref, ref->lf_failed > 0);
-        }
-        cfs_spin_unlock(&ref->lf_guard);
+       spin_lock(&ref->lf_guard);
+       if (link != ERR_PTR(-ENOMEM)) {
+               REFASSERT(ref, link->ll_ref == ref);
+               REFASSERT(ref, lu_ref_link_eq(link, scope, source0));
+               link->ll_source = source1;
+       } else {
+               REFASSERT(ref, ref->lf_failed > 0);
+       }
+       spin_unlock(&ref->lf_guard);
 }
 EXPORT_SYMBOL(lu_ref_set_at);
 
 void lu_ref_del_at(struct lu_ref *ref, struct lu_ref_link *link,
-                   const char *scope, const void *source)
+                  const char *scope, const void *source)
 {
-        if (link != ERR_PTR(-ENOMEM)) {
-                cfs_spin_lock(&ref->lf_guard);
-                REFASSERT(ref, link->ll_ref == ref);
-                REFASSERT(ref, lu_ref_link_eq(link, scope, source));
-                cfs_list_del(&link->ll_linkage);
-                ref->lf_refs--;
-                cfs_spin_unlock(&ref->lf_guard);
-                OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
-        } else {
-                cfs_spin_lock(&ref->lf_guard);
-                REFASSERT(ref, ref->lf_failed > 0);
-                ref->lf_failed--;
-                cfs_spin_unlock(&ref->lf_guard);
-        }
+       if (link != ERR_PTR(-ENOMEM)) {
+               spin_lock(&ref->lf_guard);
+               REFASSERT(ref, link->ll_ref == ref);
+               REFASSERT(ref, lu_ref_link_eq(link, scope, source));
+               cfs_list_del(&link->ll_linkage);
+               ref->lf_refs--;
+               spin_unlock(&ref->lf_guard);
+               OBD_SLAB_FREE(link, lu_ref_link_kmem, sizeof(*link));
+       } else {
+               spin_lock(&ref->lf_guard);
+               REFASSERT(ref, ref->lf_failed > 0);
+               ref->lf_failed--;
+               spin_unlock(&ref->lf_guard);
+       }
 }
 EXPORT_SYMBOL(lu_ref_del_at);
 
@@ -302,14 +302,14 @@ EXPORT_SYMBOL(lu_ref_del_at);
 
 static void *lu_ref_seq_start(struct seq_file *seq, loff_t *pos)
 {
-        struct lu_ref *ref = seq->private;
+       struct lu_ref *ref = seq->private;
 
-        cfs_spin_lock(&lu_ref_refs_guard);
-        if (cfs_list_empty(&ref->lf_linkage))
-                ref = NULL;
-        cfs_spin_unlock(&lu_ref_refs_guard);
+       spin_lock(&lu_ref_refs_guard);
+       if (cfs_list_empty(&ref->lf_linkage))
+               ref = NULL;
+       spin_unlock(&lu_ref_refs_guard);
 
-        return ref;
+       return ref;
 }
 
 static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
@@ -320,16 +320,16 @@ static void *lu_ref_seq_next(struct seq_file *seq, void *p, loff_t *pos)
         LASSERT(seq->private == p);
         LASSERT(!cfs_list_empty(&ref->lf_linkage));
 
-        cfs_spin_lock(&lu_ref_refs_guard);
-        next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
-        if (&next->lf_linkage == &lu_ref_refs) {
-                p = NULL;
-        } else {
-                (*pos)++;
-                cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
-        }
-        cfs_spin_unlock(&lu_ref_refs_guard);
-        return p;
+       spin_lock(&lu_ref_refs_guard);
+       next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+       if (&next->lf_linkage == &lu_ref_refs) {
+               p = NULL;
+       } else {
+               (*pos)++;
+               cfs_list_move(&ref->lf_linkage, &next->lf_linkage);
+       }
+       spin_unlock(&lu_ref_refs_guard);
+       return p;
 }
 
 static void lu_ref_seq_stop(struct seq_file *seq, void *p)
@@ -340,19 +340,18 @@ static void lu_ref_seq_stop(struct seq_file *seq, void *p)
 
 static int lu_ref_seq_show(struct seq_file *seq, void *p)
 {
-        struct lu_ref *ref  = p;
-        struct lu_ref *next; 
-
-        cfs_spin_lock(&lu_ref_refs_guard);
-        next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
-        if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
-                cfs_spin_unlock(&lu_ref_refs_guard);
-                return 0;
-        }
-
-        /* print the entry */
-
-        cfs_spin_lock(&next->lf_guard);
+       struct lu_ref *ref  = p;
+       struct lu_ref *next;
+
+       spin_lock(&lu_ref_refs_guard);
+       next = cfs_list_entry(ref->lf_linkage.next, struct lu_ref, lf_linkage);
+       if ((&next->lf_linkage == &lu_ref_refs) || lu_ref_is_marker(next)) {
+               spin_unlock(&lu_ref_refs_guard);
+               return 0;
+       }
+
+       /* print the entry */
+       spin_lock(&next->lf_guard);
         seq_printf(seq, "lu_ref: %p %d %d %s:%d\n",
                    next, next->lf_refs, next->lf_failed,
                    next->lf_func, next->lf_line);
@@ -366,10 +365,10 @@ static int lu_ref_seq_show(struct seq_file *seq, void *p)
                         seq_printf(seq, "  #%d link: %s %p\n",
                                    i++, link->ll_scope, link->ll_source);
         }
-        cfs_spin_unlock(&next->lf_guard);
-        cfs_spin_unlock(&lu_ref_refs_guard);
+       spin_unlock(&next->lf_guard);
+       spin_unlock(&lu_ref_refs_guard);
 
-        return 0;
+       return 0;
 }
 
 static struct seq_operations lu_ref_seq_ops = {
@@ -381,17 +380,17 @@ static struct seq_operations lu_ref_seq_ops = {
 
 static int lu_ref_seq_open(struct inode *inode, struct file *file)
 {
-        struct lu_ref *marker = &lu_ref_marker;
-        int result = 0;
-
-        result = seq_open(file, &lu_ref_seq_ops);
-        if (result == 0) {
-                cfs_spin_lock(&lu_ref_refs_guard);
-                if (!cfs_list_empty(&marker->lf_linkage))
-                        result = -EAGAIN;
-                else
-                        cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
-                cfs_spin_unlock(&lu_ref_refs_guard);
+       struct lu_ref *marker = &lu_ref_marker;
+       int result = 0;
+
+       result = seq_open(file, &lu_ref_seq_ops);
+       if (result == 0) {
+               spin_lock(&lu_ref_refs_guard);
+               if (!cfs_list_empty(&marker->lf_linkage))
+                       result = -EAGAIN;
+               else
+                       cfs_list_add(&marker->lf_linkage, &lu_ref_refs);
+               spin_unlock(&lu_ref_refs_guard);
 
                 if (result == 0) {
                         struct seq_file *f = file->private_data;
@@ -406,13 +405,13 @@ static int lu_ref_seq_open(struct inode *inode, struct file *file)
 
 static int lu_ref_seq_release(struct inode *inode, struct file *file)
 {
-        struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
+       struct lu_ref *ref = ((struct seq_file *)file->private_data)->private;
 
-        cfs_spin_lock(&lu_ref_refs_guard);
-        cfs_list_del_init(&ref->lf_linkage);
-        cfs_spin_unlock(&lu_ref_refs_guard);
+       spin_lock(&lu_ref_refs_guard);
+       cfs_list_del_init(&ref->lf_linkage);
+       spin_unlock(&lu_ref_refs_guard);
 
-        return seq_release(inode, file);
+       return seq_release(inode, file);
 }
 
 static struct file_operations lu_ref_dump_fops = {
@@ -427,13 +426,12 @@ static struct file_operations lu_ref_dump_fops = {
 
 int lu_ref_global_init(void)
 {
-        int result;
-
-        CDEBUG(D_CONSOLE,
-               "lu_ref tracking is enabled. Performance isn't.\n");
+       int result;
 
+       CDEBUG(D_CONSOLE,
+              "lu_ref tracking is enabled. Performance isn't.\n");
 
-        cfs_spin_lock_init(&lu_ref_refs_guard);
+       spin_lock_init(&lu_ref_refs_guard);
         result = lu_kmem_init(lu_ref_caches);
 
 #if defined(__KERNEL__) && defined(LPROCFS)
index 6cf943b..7a7c19f 100644 (file)
 # define list_for_each_rcu       cfs_list_for_each
 # define list_for_each_safe_rcu  cfs_list_for_each_safe
 # define list_for_each_entry_rcu cfs_list_for_each_entry
-# define rcu_read_lock()         cfs_spin_lock(&bucket->lock)
-# define rcu_read_unlock()       cfs_spin_unlock(&bucket->lock)
+# define rcu_read_lock()         spin_lock(&bucket->lock)
+# define rcu_read_unlock()       spin_unlock(&bucket->lock)
 #endif /* !__KERNEL__ */
 
 static __u64 handle_base;
 #define HANDLE_INCR 7
-static cfs_spinlock_t handle_base_lock;
+static spinlock_t handle_base_lock;
 
 static struct handle_bucket {
-        cfs_spinlock_t  lock;
-        cfs_list_t      head;
+       spinlock_t      lock;
+       cfs_list_t      head;
 } *handle_hash;
 
 #ifdef __arch_um__
@@ -95,33 +95,33 @@ void class_handle_hash(struct portals_handle *h,
          * This is fast, but simplistic cookie generation algorithm, it will
          * need a re-do at some point in the future for security.
          */
-        cfs_spin_lock(&handle_base_lock);
-        handle_base += HANDLE_INCR;
-
-        h->h_cookie = handle_base;
-        if (unlikely(handle_base == 0)) {
-                /*
-                 * Cookie of zero is "dangerous", because in many places it's
-                 * assumed that 0 means "unassigned" handle, not bound to any
-                 * object.
-                 */
-                CWARN("The universe has been exhausted: cookie wrap-around.\n");
-                handle_base += HANDLE_INCR;
-        }
-        cfs_spin_unlock(&handle_base_lock);
+       spin_lock(&handle_base_lock);
+       handle_base += HANDLE_INCR;
+
+       h->h_cookie = handle_base;
+       if (unlikely(handle_base == 0)) {
+               /*
+                * Cookie of zero is "dangerous", because in many places it's
+                * assumed that 0 means "unassigned" handle, not bound to any
+                * object.
+                */
+               CWARN("The universe has been exhausted: cookie wrap-around.\n");
+               handle_base += HANDLE_INCR;
+       }
+       spin_unlock(&handle_base_lock);
 
        h->h_ops = ops;
-        cfs_spin_lock_init(&h->h_lock);
+       spin_lock_init(&h->h_lock);
 
-        bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
-        cfs_spin_lock(&bucket->lock);
-        list_add_rcu(&h->h_link, &bucket->head);
-        h->h_in = 1;
-        cfs_spin_unlock(&bucket->lock);
+       bucket = &handle_hash[h->h_cookie & HANDLE_HASH_MASK];
+       spin_lock(&bucket->lock);
+       list_add_rcu(&h->h_link, &bucket->head);
+       h->h_in = 1;
+       spin_unlock(&bucket->lock);
 
-        CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
-               h, h->h_cookie);
-        EXIT;
+       CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
+              h, h->h_cookie);
+       EXIT;
 }
 EXPORT_SYMBOL(class_handle_hash);
 
@@ -136,40 +136,40 @@ static void class_handle_unhash_nolock(struct portals_handle *h)
         CDEBUG(D_INFO, "removing object %p with handle "LPX64" from hash\n",
                h, h->h_cookie);
 
-        cfs_spin_lock(&h->h_lock);
-        if (h->h_in == 0) {
-                cfs_spin_unlock(&h->h_lock);
-                return;
-        }
-        h->h_in = 0;
-        cfs_spin_unlock(&h->h_lock);
-        list_del_rcu(&h->h_link);
+       spin_lock(&h->h_lock);
+       if (h->h_in == 0) {
+               spin_unlock(&h->h_lock);
+               return;
+       }
+       h->h_in = 0;
+       spin_unlock(&h->h_lock);
+       list_del_rcu(&h->h_link);
 }
 
 void class_handle_unhash(struct portals_handle *h)
 {
-        struct handle_bucket *bucket;
-        bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
+       struct handle_bucket *bucket;
+       bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
 
-        cfs_spin_lock(&bucket->lock);
-        class_handle_unhash_nolock(h);
-        cfs_spin_unlock(&bucket->lock);
+       spin_lock(&bucket->lock);
+       class_handle_unhash_nolock(h);
+       spin_unlock(&bucket->lock);
 }
 EXPORT_SYMBOL(class_handle_unhash);
 
 void class_handle_hash_back(struct portals_handle *h)
 {
-        struct handle_bucket *bucket;
-        ENTRY;
+       struct handle_bucket *bucket;
+       ENTRY;
 
-        bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
+       bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
 
-        cfs_spin_lock(&bucket->lock);
-        list_add_rcu(&h->h_link, &bucket->head);
-        h->h_in = 1;
-        cfs_spin_unlock(&bucket->lock);
+       spin_lock(&bucket->lock);
+       list_add_rcu(&h->h_link, &bucket->head);
+       h->h_in = 1;
+       spin_unlock(&bucket->lock);
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(class_handle_hash_back);
 
@@ -191,17 +191,17 @@ void *class_handle2object(__u64 cookie)
                 if (h->h_cookie != cookie)
                         continue;
 
-                cfs_spin_lock(&h->h_lock);
-                if (likely(h->h_in != 0)) {
+               spin_lock(&h->h_lock);
+               if (likely(h->h_in != 0)) {
                        h->h_ops->hop_addref(h);
-                        retval = h;
-                }
-                cfs_spin_unlock(&h->h_lock);
-                break;
-        }
-        rcu_read_unlock();
-
-        RETURN(retval);
+                       retval = h;
+               }
+               spin_unlock(&h->h_lock);
+               break;
+       }
+       rcu_read_unlock();
+
+       RETURN(retval);
 }
 EXPORT_SYMBOL(class_handle2object);
 
@@ -229,12 +229,12 @@ int class_handle_init(void)
         if (handle_hash == NULL)
                 return -ENOMEM;
 
-        cfs_spin_lock_init(&handle_base_lock);
-        for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
-             bucket--) {
-                CFS_INIT_LIST_HEAD(&bucket->head);
-                cfs_spin_lock_init(&bucket->lock);
-        }
+       spin_lock_init(&handle_base_lock);
+       for (bucket = handle_hash + HANDLE_HASH_SIZE - 1; bucket >= handle_hash;
+            bucket--) {
+               CFS_INIT_LIST_HEAD(&bucket->head);
+               spin_lock_init(&bucket->lock);
+       }
 
         /** bug 21430: add randomness to the initial base */
         cfs_get_random_bytes(seed, sizeof(seed));
@@ -249,24 +249,24 @@ int class_handle_init(void)
 
 static int cleanup_all_handles(void)
 {
-        int rc;
-        int i;
+       int rc;
+       int i;
 
-        for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
-                struct portals_handle *h;
+       for (rc = i = 0; i < HANDLE_HASH_SIZE; i++) {
+               struct portals_handle *h;
 
-                cfs_spin_lock(&handle_hash[i].lock);
-                list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
+               spin_lock(&handle_hash[i].lock);
+               list_for_each_entry_rcu(h, &(handle_hash[i].head), h_link) {
                        CERROR("force clean handle "LPX64" addr %p ops %p\n",
                               h->h_cookie, h, h->h_ops);
 
-                        class_handle_unhash_nolock(h);
-                        rc++;
-                }
-                cfs_spin_unlock(&handle_hash[i].lock);
-        }
+                       class_handle_unhash_nolock(h);
+                       rc++;
+               }
+               spin_unlock(&handle_hash[i].lock);
+       }
 
-        return rc;
+       return rc;
 }
 
 void class_handle_cleanup(void)
index 0d53c7e..d80d81d 100644 (file)
@@ -57,13 +57,13 @@ struct uuid_nid_data {
 };
 
 /* FIXME: This should probably become more elegant than a global linked list */
-static cfs_list_t           g_uuid_list;
-static cfs_spinlock_t       g_uuid_lock;
+static cfs_list_t      g_uuid_list;
+static spinlock_t      g_uuid_lock;
 
 void class_init_uuidlist(void)
 {
-        CFS_INIT_LIST_HEAD(&g_uuid_list);
-        cfs_spin_lock_init(&g_uuid_lock);
+       CFS_INIT_LIST_HEAD(&g_uuid_list);
+       spin_lock_init(&g_uuid_lock);
 }
 
 void class_exit_uuidlist(void)
@@ -74,24 +74,24 @@ void class_exit_uuidlist(void)
 
 int lustre_uuid_to_peer(const char *uuid, lnet_nid_t *peer_nid, int index)
 {
-        struct uuid_nid_data *data;
-        struct obd_uuid tmp;
-        int rc = -ENOENT;
-
-        obd_str2uuid(&tmp, uuid);
-        cfs_spin_lock(&g_uuid_lock);
-        cfs_list_for_each_entry(data, &g_uuid_list, un_list) {
-                if (obd_uuid_equals(&data->un_uuid, &tmp)) {
-                        if (index >= data->un_nid_count)
-                                break;
-
-                        rc = 0;
-                        *peer_nid = data->un_nids[index];
-                        break;
-                }
-        }
-        cfs_spin_unlock(&g_uuid_lock);
-        return rc;
+       struct uuid_nid_data *data;
+       struct obd_uuid tmp;
+       int rc = -ENOENT;
+
+       obd_str2uuid(&tmp, uuid);
+       spin_lock(&g_uuid_lock);
+       cfs_list_for_each_entry(data, &g_uuid_list, un_list) {
+               if (obd_uuid_equals(&data->un_uuid, &tmp)) {
+                       if (index >= data->un_nid_count)
+                               break;
+
+                       rc = 0;
+                       *peer_nid = data->un_nids[index];
+                       break;
+               }
+       }
+       spin_unlock(&g_uuid_lock);
+       return rc;
 }
 EXPORT_SYMBOL(lustre_uuid_to_peer);
 
@@ -115,7 +115,7 @@ int class_add_uuid(const char *uuid, __u64 nid)
         data->un_nids[0] = nid;
         data->un_nid_count = 1;
 
-        cfs_spin_lock(&g_uuid_lock);
+       spin_lock(&g_uuid_lock);
         cfs_list_for_each_entry(entry, &g_uuid_list, un_list) {
                 if (obd_uuid_equals(&entry->un_uuid, &data->un_uuid)) {
                         int i;
@@ -134,7 +134,7 @@ int class_add_uuid(const char *uuid, __u64 nid)
         }
         if (!found)
                 cfs_list_add(&data->un_list, &g_uuid_list);
-        cfs_spin_unlock(&g_uuid_lock);
+       spin_unlock(&g_uuid_lock);
 
         if (found) {
                 CDEBUG(D_INFO, "found uuid %s %s cnt=%d\n", uuid,
@@ -150,10 +150,10 @@ EXPORT_SYMBOL(class_add_uuid);
 /* Delete the nids for one uuid if specified, otherwise delete all */
 int class_del_uuid(const char *uuid)
 {
-        CFS_LIST_HEAD(deathrow);
-        struct uuid_nid_data *data;
+       CFS_LIST_HEAD(deathrow);
+       struct uuid_nid_data *data;
 
-        cfs_spin_lock(&g_uuid_lock);
+       spin_lock(&g_uuid_lock);
         if (uuid != NULL) {
                 struct obd_uuid tmp;
 
@@ -166,7 +166,7 @@ int class_del_uuid(const char *uuid)
                 }
         } else
                 cfs_list_splice_init(&g_uuid_list, &deathrow);
-        cfs_spin_unlock(&g_uuid_lock);
+       spin_unlock(&g_uuid_lock);
 
         if (uuid != NULL && cfs_list_empty(&deathrow)) {
                 CDEBUG(D_INFO, "Try to delete a non-existent uuid %s\n", uuid);
@@ -199,7 +199,7 @@ int class_check_uuid(struct obd_uuid *uuid, __u64 nid)
         CDEBUG(D_INFO, "check if uuid %s has %s.\n",
                obd_uuid2str(uuid), libcfs_nid2str(nid));
 
-        cfs_spin_lock(&g_uuid_lock);
+       spin_lock(&g_uuid_lock);
         cfs_list_for_each_entry(entry, &g_uuid_list, un_list) {
                 int i;
 
@@ -215,7 +215,7 @@ int class_check_uuid(struct obd_uuid *uuid, __u64 nid)
                 }
                 break;
         }
-        cfs_spin_unlock (&g_uuid_lock);
-        RETURN(found);
+       spin_unlock(&g_uuid_lock);
+       RETURN(found);
 }
 EXPORT_SYMBOL(class_check_uuid);
index 2b85c6d..dc73c63 100644 (file)
@@ -54,7 +54,7 @@
 static cfs_list_t llo_lobj_list;
 
 /** Lock to protect list manipulations */
-static cfs_mutex_t     llo_lock;
+static struct mutex    llo_lock;
 
 /**
  * Structure used to maintain state of path parsing.
@@ -371,18 +371,18 @@ EXPORT_SYMBOL(llo_store_create);
 
 void llo_local_obj_register(struct lu_local_obj_desc *llod)
 {
-        cfs_mutex_lock(&llo_lock);
+       mutex_lock(&llo_lock);
         cfs_list_add_tail(&llod->llod_linkage, &llo_lobj_list);
-        cfs_mutex_unlock(&llo_lock);
+       mutex_unlock(&llo_lock);
 }
 
 EXPORT_SYMBOL(llo_local_obj_register);
 
 void llo_local_obj_unregister(struct lu_local_obj_desc *llod)
 {
-        cfs_mutex_lock(&llo_lock);
+       mutex_lock(&llo_lock);
         cfs_list_del(&llod->llod_linkage);
-        cfs_mutex_unlock(&llo_lock);
+       mutex_unlock(&llo_lock);
 }
 
 EXPORT_SYMBOL(llo_local_obj_unregister);
@@ -403,7 +403,7 @@ int llo_local_objects_setup(const struct lu_env *env,
         int rc = 0;
 
         fid = &info->lti_cfid;
-        cfs_mutex_lock(&llo_lock);
+       mutex_lock(&llo_lock);
 
         cfs_list_for_each_entry(scan, &llo_lobj_list, llod_linkage) {
                 lu_local_obj_fid(fid, scan->llod_oid);
@@ -432,7 +432,7 @@ int llo_local_objects_setup(const struct lu_env *env,
         }
 
 out:
-        cfs_mutex_unlock(&llo_lock);
+       mutex_unlock(&llo_lock);
         return rc;
 }
 
@@ -443,7 +443,7 @@ int llo_global_init(void)
         int result;
 
         CFS_INIT_LIST_HEAD(&llo_lobj_list);
-        cfs_mutex_init(&llo_lock);
+       mutex_init(&llo_lock);
 
         LU_CONTEXT_KEY_INIT(&llod_key);
         result = lu_context_key_register(&llod_key);
index 02c4669..129e05d 100644 (file)
@@ -382,28 +382,28 @@ int class_attach(struct lustre_cfg *lcfg)
         LASSERTF(strncmp(obd->obd_name, name, strlen(name)) == 0,
                  "%p obd_name %s != %s\n", obd, obd->obd_name, name);
 
-        cfs_rwlock_init(&obd->obd_pool_lock);
-        obd->obd_pool_limit = 0;
-        obd->obd_pool_slv = 0;
-
-        CFS_INIT_LIST_HEAD(&obd->obd_exports);
-        CFS_INIT_LIST_HEAD(&obd->obd_unlinked_exports);
-        CFS_INIT_LIST_HEAD(&obd->obd_delayed_exports);
-        CFS_INIT_LIST_HEAD(&obd->obd_exports_timed);
-        CFS_INIT_LIST_HEAD(&obd->obd_nid_stats);
-        cfs_spin_lock_init(&obd->obd_nid_lock);
-        cfs_spin_lock_init(&obd->obd_dev_lock);
-        cfs_mutex_init(&obd->obd_dev_mutex);
-        cfs_spin_lock_init(&obd->obd_osfs_lock);
-        /* obd->obd_osfs_age must be set to a value in the distant
-         * past to guarantee a fresh statfs is fetched on mount. */
-        obd->obd_osfs_age = cfs_time_shift_64(-1000);
-
-        /* XXX belongs in setup not attach  */
-        cfs_init_rwsem(&obd->obd_observer_link_sem);
-        /* recovery data */
-        cfs_init_timer(&obd->obd_recovery_timer);
-        cfs_spin_lock_init(&obd->obd_recovery_task_lock);
+       rwlock_init(&obd->obd_pool_lock);
+       obd->obd_pool_limit = 0;
+       obd->obd_pool_slv = 0;
+
+       CFS_INIT_LIST_HEAD(&obd->obd_exports);
+       CFS_INIT_LIST_HEAD(&obd->obd_unlinked_exports);
+       CFS_INIT_LIST_HEAD(&obd->obd_delayed_exports);
+       CFS_INIT_LIST_HEAD(&obd->obd_exports_timed);
+       CFS_INIT_LIST_HEAD(&obd->obd_nid_stats);
+       spin_lock_init(&obd->obd_nid_lock);
+       spin_lock_init(&obd->obd_dev_lock);
+       mutex_init(&obd->obd_dev_mutex);
+       spin_lock_init(&obd->obd_osfs_lock);
+       /* obd->obd_osfs_age must be set to a value in the distant
+        * past to guarantee a fresh statfs is fetched on mount. */
+       obd->obd_osfs_age = cfs_time_shift_64(-1000);
+
+       /* XXX belongs in setup not attach  */
+       init_rwsem(&obd->obd_observer_link_sem);
+       /* recovery data */
+       cfs_init_timer(&obd->obd_recovery_timer);
+       spin_lock_init(&obd->obd_recovery_task_lock);
         cfs_waitq_init(&obd->obd_next_transno_waitq);
         cfs_waitq_init(&obd->obd_evict_inprogress_waitq);
         CFS_INIT_LIST_HEAD(&obd->obd_req_replay_queue);
@@ -431,9 +431,9 @@ int class_attach(struct lustre_cfg *lcfg)
         }
 
         /* Detach drops this */
-        cfs_spin_lock(&obd->obd_dev_lock);
-        cfs_atomic_set(&obd->obd_refcount, 1);
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
+       cfs_atomic_set(&obd->obd_refcount, 1);
+       spin_unlock(&obd->obd_dev_lock);
         lu_ref_init(&obd->obd_reference);
         lu_ref_add(&obd->obd_reference, "attach", obd);
 
@@ -479,9 +479,9 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         }
 
         /* is someone else setting us up right now? (attach inits spinlock) */
-        cfs_spin_lock(&obd->obd_dev_lock);
-        if (obd->obd_starting) {
-                cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
+       if (obd->obd_starting) {
+               spin_unlock(&obd->obd_dev_lock);
                 CERROR("Device %d setup in progress (type %s)\n",
                        obd->obd_minor, obd->obd_type->typ_name);
                 RETURN(-EEXIST);
@@ -492,7 +492,7 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         obd->obd_uuid_hash = NULL;
         obd->obd_nid_hash = NULL;
         obd->obd_nid_stats_hash = NULL;
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
         /* create an uuid-export lustre hash */
         obd->obd_uuid_hash = cfs_hash_create("UUID_HASH",
@@ -541,10 +541,10 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
 
         obd->obd_set_up = 1;
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        /* cleanup drops this */
-        class_incref(obd, "setup", obd);
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
+       /* cleanup drops this */
+       class_incref(obd, "setup", obd);
+       spin_unlock(&obd->obd_dev_lock);
 
         CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n",
                obd->obd_name, obd->obd_uuid.uuid);
@@ -586,14 +586,14 @@ int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
                 RETURN(-EBUSY);
         }
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        if (!obd->obd_attached) {
-                cfs_spin_unlock(&obd->obd_dev_lock);
-                CERROR("OBD device %d not attached\n", obd->obd_minor);
-                RETURN(-ENODEV);
-        }
-        obd->obd_attached = 0;
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
+       if (!obd->obd_attached) {
+               spin_unlock(&obd->obd_dev_lock);
+               CERROR("OBD device %d not attached\n", obd->obd_minor);
+               RETURN(-ENODEV);
+       }
+       obd->obd_attached = 0;
+       spin_unlock(&obd->obd_dev_lock);
 
         CDEBUG(D_IOCTL, "detach on obd %s (uuid %s)\n",
                obd->obd_name, obd->obd_uuid.uuid);
@@ -620,24 +620,24 @@ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
                 RETURN(-ENODEV);
         }
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        if (obd->obd_stopping) {
-                cfs_spin_unlock(&obd->obd_dev_lock);
-                CERROR("OBD %d already stopping\n", obd->obd_minor);
-                RETURN(-ENODEV);
-        }
-        /* Leave this on forever */
-        obd->obd_stopping = 1;
+       spin_lock(&obd->obd_dev_lock);
+       if (obd->obd_stopping) {
+               spin_unlock(&obd->obd_dev_lock);
+               CERROR("OBD %d already stopping\n", obd->obd_minor);
+               RETURN(-ENODEV);
+       }
+       /* Leave this on forever */
+       obd->obd_stopping = 1;
 
        /* wait for already-arrived-connections to finish. */
        while (obd->obd_conn_inprogress > 0) {
-               cfs_spin_unlock(&obd->obd_dev_lock);
+               spin_unlock(&obd->obd_dev_lock);
 
                cfs_cond_resched();
 
-               cfs_spin_lock(&obd->obd_dev_lock);
+               spin_lock(&obd->obd_dev_lock);
        }
-       cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 
         if (lcfg->lcfg_bufcount >= 2 && LUSTRE_CFG_BUFLEN(lcfg, 1) > 0) {
                 for (flag = lustre_cfg_string(lcfg, 1); *flag != 0; flag++)
@@ -721,24 +721,24 @@ EXPORT_SYMBOL(class_incref);
 
 void class_decref(struct obd_device *obd, const char *scope, const void *source)
 {
-        int err;
-        int refs;
+       int err;
+       int refs;
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        cfs_atomic_dec(&obd->obd_refcount);
-        refs = cfs_atomic_read(&obd->obd_refcount);
-        cfs_spin_unlock(&obd->obd_dev_lock);
-        lu_ref_del(&obd->obd_reference, scope, source);
+       spin_lock(&obd->obd_dev_lock);
+       cfs_atomic_dec(&obd->obd_refcount);
+       refs = cfs_atomic_read(&obd->obd_refcount);
+       spin_unlock(&obd->obd_dev_lock);
+       lu_ref_del(&obd->obd_reference, scope, source);
 
-        CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
+       CDEBUG(D_INFO, "Decref %s (%p) now %d\n", obd->obd_name, obd, refs);
 
-        if ((refs == 1) && obd->obd_stopping) {
-                /* All exports have been destroyed; there should
-                   be no more in-progress ops by this point.*/
+       if ((refs == 1) && obd->obd_stopping) {
+               /* All exports have been destroyed; there should
+                  be no more in-progress ops by this point.*/
 
-                cfs_spin_lock(&obd->obd_self_export->exp_lock);
-                obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
-                cfs_spin_unlock(&obd->obd_self_export->exp_lock);
+               spin_lock(&obd->obd_self_export->exp_lock);
+               obd->obd_self_export->exp_flags |= exp_flags_from_obd(obd);
+               spin_unlock(&obd->obd_self_export->exp_lock);
 
                 /* note that we'll recurse into class_decref again */
                 class_unlink_export(obd->obd_self_export);
index ea465c8..be886ad 100644 (file)
@@ -65,7 +65,7 @@ static void (*kill_super_cb)(struct super_block *sb) = NULL;
 
 /*********** mount lookup *********/
 
-CFS_DEFINE_MUTEX(lustre_mount_info_lock);
+DEFINE_MUTEX(lustre_mount_info_lock);
 static CFS_LIST_HEAD(server_mount_info_list);
 
 static struct lustre_mount_info *server_find_mount(const char *name)
@@ -105,10 +105,10 @@ static int server_register_mount(const char *name, struct super_block *sb,
         }
         strcpy(name_cp, name);
 
-        cfs_mutex_lock(&lustre_mount_info_lock);
+       mutex_lock(&lustre_mount_info_lock);
 
         if (server_find_mount(name)) {
-                cfs_mutex_unlock(&lustre_mount_info_lock);
+               mutex_unlock(&lustre_mount_info_lock);
                 OBD_FREE(lmi, sizeof(*lmi));
                 OBD_FREE(name_cp, strlen(name) + 1);
                 CERROR("Already registered %s\n", name);
@@ -119,7 +119,7 @@ static int server_register_mount(const char *name, struct super_block *sb,
         lmi->lmi_mnt = mnt;
         cfs_list_add(&lmi->lmi_list_chain, &server_mount_info_list);
 
-        cfs_mutex_unlock(&lustre_mount_info_lock);
+       mutex_unlock(&lustre_mount_info_lock);
 
        CDEBUG(D_MOUNT, "reg_mnt %p from %s\n", lmi->lmi_mnt, name);
 
@@ -132,10 +132,10 @@ static int server_deregister_mount(const char *name)
         struct lustre_mount_info *lmi;
         ENTRY;
 
-        cfs_mutex_lock(&lustre_mount_info_lock);
+       mutex_lock(&lustre_mount_info_lock);
         lmi = server_find_mount(name);
         if (!lmi) {
-                cfs_mutex_unlock(&lustre_mount_info_lock);
+               mutex_unlock(&lustre_mount_info_lock);
                 CERROR("%s not registered\n", name);
                 RETURN(-ENOENT);
         }
@@ -145,7 +145,7 @@ static int server_deregister_mount(const char *name)
         OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
         cfs_list_del(&lmi->lmi_list_chain);
         OBD_FREE(lmi, sizeof(*lmi));
-        cfs_mutex_unlock(&lustre_mount_info_lock);
+       mutex_unlock(&lustre_mount_info_lock);
 
         RETURN(0);
 }
@@ -159,9 +159,9 @@ struct lustre_mount_info *server_get_mount(const char *name)
         struct lustre_sb_info *lsi;
         ENTRY;
 
-        cfs_mutex_lock(&lustre_mount_info_lock);
+       mutex_lock(&lustre_mount_info_lock);
         lmi = server_find_mount(name);
-        cfs_mutex_unlock(&lustre_mount_info_lock);
+       mutex_unlock(&lustre_mount_info_lock);
         if (!lmi) {
                 CERROR("Can't find mount for %s\n", name);
                 RETURN(NULL);
@@ -187,9 +187,9 @@ struct lustre_mount_info *server_get_mount_2(const char *name)
         struct lustre_mount_info *lmi;
         ENTRY;
 
-        cfs_mutex_lock(&lustre_mount_info_lock);
+       mutex_lock(&lustre_mount_info_lock);
         lmi = server_find_mount(name);
-        cfs_mutex_unlock(&lustre_mount_info_lock);
+       mutex_unlock(&lustre_mount_info_lock);
         if (!lmi)
                 CERROR("Can't find mount for %s\n", name);
 
@@ -206,9 +206,9 @@ int server_put_mount(const char *name, struct vfsmount *mnt)
         struct lustre_sb_info *lsi;
         ENTRY;
 
-        cfs_mutex_lock(&lustre_mount_info_lock);
+       mutex_lock(&lustre_mount_info_lock);
         lmi = server_find_mount(name);
-        cfs_mutex_unlock(&lustre_mount_info_lock);
+       mutex_unlock(&lustre_mount_info_lock);
         if (!lmi) {
                 CERROR("Can't find mount for %s\n", name);
                 RETURN(-ENOENT);
@@ -441,7 +441,7 @@ static int server_stop_mgs(struct super_block *sb)
         RETURN(rc);
 }
 
-CFS_DEFINE_MUTEX(mgc_start_lock);
+DEFINE_MUTEX(mgc_start_lock);
 
 /** Set up a mgc obd to process startup logs
  *
@@ -494,7 +494,7 @@ static int lustre_start_mgc(struct super_block *sb)
                 RETURN(-EINVAL);
         }
 
-        cfs_mutex_lock(&mgc_start_lock);
+       mutex_lock(&mgc_start_lock);
 
         len = strlen(LUSTRE_MGC_OBDNAME) + strlen(libcfs_nid2str(nid)) + 1;
         OBD_ALLOC(mgcname, len);
@@ -706,7 +706,7 @@ out:
            to the same mgc.*/
         lsi->lsi_mgc = obd;
 out_free:
-        cfs_mutex_unlock(&mgc_start_lock);
+       mutex_unlock(&mgc_start_lock);
 
         if (data)
                 OBD_FREE_PTR(data);
@@ -732,7 +732,7 @@ static int lustre_stop_mgc(struct super_block *sb)
                 RETURN(-ENOENT);
         lsi->lsi_mgc = NULL;
 
-        cfs_mutex_lock(&mgc_start_lock);
+       mutex_lock(&mgc_start_lock);
         LASSERT(cfs_atomic_read(&obd->u.cli.cl_mgc_refcount) > 0);
         if (!cfs_atomic_dec_and_test(&obd->u.cli.cl_mgc_refcount)) {
                 /* This is not fatal, every client that stops
@@ -784,7 +784,7 @@ out:
                 OBD_FREE(niduuid, len);
 
         /* class_import_put will get rid of the additional connections */
-        cfs_mutex_unlock(&mgc_start_lock);
+       mutex_unlock(&mgc_start_lock);
         RETURN(rc);
 }
 
@@ -942,7 +942,7 @@ cleanup:
 EXPORT_SYMBOL(tgt_name2ospname);
 
 static CFS_LIST_HEAD(osp_register_list);
-CFS_DEFINE_MUTEX(osp_register_list_lock);
+DEFINE_MUTEX(osp_register_list_lock);
 
 int lustre_register_osp_item(char *ospname, struct obd_export **exp,
                             register_osp_cb cb_func, void *cb_data)
@@ -959,7 +959,7 @@ int lustre_register_osp_item(char *ospname, struct obd_export **exp,
        if (ori == NULL)
                RETURN(-ENOMEM);
 
-       cfs_mutex_lock(&osp_register_list_lock);
+       mutex_lock(&osp_register_list_lock);
 
        osp = class_name2obd(ospname);
        if (osp != NULL && osp->obd_set_up == 1) {
@@ -967,7 +967,7 @@ int lustre_register_osp_item(char *ospname, struct obd_export **exp,
 
                OBD_ALLOC_PTR(uuid);
                if (uuid == NULL) {
-                       cfs_mutex_unlock(&osp_register_list_lock);
+                       mutex_unlock(&osp_register_list_lock);
                        RETURN(-ENOMEM);
                }
                memcpy(uuid->uuid, ospname, strlen(ospname));
@@ -985,7 +985,7 @@ int lustre_register_osp_item(char *ospname, struct obd_export **exp,
        if (*exp != NULL && cb_func != NULL)
                cb_func(cb_data);
 
-       cfs_mutex_unlock(&osp_register_list_lock);
+       mutex_unlock(&osp_register_list_lock);
        RETURN(0);
 }
 EXPORT_SYMBOL(lustre_register_osp_item);
@@ -994,7 +994,7 @@ void lustre_deregister_osp_item(struct obd_export **exp)
 {
        struct osp_register_item *ori, *tmp;
 
-       cfs_mutex_lock(&osp_register_list_lock);
+       mutex_lock(&osp_register_list_lock);
        cfs_list_for_each_entry_safe(ori, tmp, &osp_register_list, ori_list) {
                if (exp == ori->ori_exp) {
                        if (*exp)
@@ -1004,7 +1004,7 @@ void lustre_deregister_osp_item(struct obd_export **exp)
                        break;
                }
        }
-       cfs_mutex_unlock(&osp_register_list_lock);
+       mutex_unlock(&osp_register_list_lock);
 }
 EXPORT_SYMBOL(lustre_deregister_osp_item);
 
@@ -1013,7 +1013,7 @@ static void lustre_notify_osp_list(struct obd_export *exp)
        struct osp_register_item *ori, *tmp;
        LASSERT(exp != NULL);
 
-       cfs_mutex_lock(&osp_register_list_lock);
+       mutex_lock(&osp_register_list_lock);
        cfs_list_for_each_entry_safe(ori, tmp, &osp_register_list, ori_list) {
                if (strcmp(exp->exp_obd->obd_name, ori->ori_name))
                        continue;
@@ -1023,7 +1023,7 @@ static void lustre_notify_osp_list(struct obd_export *exp)
                if (ori->ori_cb_func != NULL)
                        ori->ori_cb_func(ori->ori_cb_data);
        }
-       cfs_mutex_unlock(&osp_register_list_lock);
+       mutex_unlock(&osp_register_list_lock);
 }
 
 static int lustre_osp_connect(struct obd_device *osp)
@@ -1428,7 +1428,7 @@ cleanup:
        RETURN(rc);
 }
 
-CFS_DEFINE_MUTEX(server_start_lock);
+DEFINE_MUTEX(server_start_lock);
 
 /* Stop MDS/OSS if nobody is using them */
 static int server_stop_servers(int lsiflags)
@@ -1438,7 +1438,7 @@ static int server_stop_servers(int lsiflags)
         int rc = 0;
         ENTRY;
 
-        cfs_mutex_lock(&server_start_lock);
+       mutex_lock(&server_start_lock);
 
         /* Either an MDT or an OST or neither  */
         /* if this was an MDT, and there are no more MDT's, clean up the MDS */
@@ -1462,7 +1462,7 @@ static int server_stop_servers(int lsiflags)
                         rc = err;
         }
 
-        cfs_mutex_unlock(&server_start_lock);
+       mutex_unlock(&server_start_lock);
 
         RETURN(rc);
 }
@@ -1786,7 +1786,7 @@ static int server_start_targets(struct super_block *sb, struct vfsmount *mnt)
         /* If we're an MDT, make sure the global MDS is running */
         if (lsi->lsi_ldd->ldd_flags & LDD_F_SV_TYPE_MDT) {
                 /* make sure the MDS is started */
-                cfs_mutex_lock(&server_start_lock);
+               mutex_lock(&server_start_lock);
                 obd = class_name2obd(LUSTRE_MDS_OBDNAME);
                 if (!obd) {
                         rc = lustre_start_simple(LUSTRE_MDS_OBDNAME,
@@ -1795,19 +1795,19 @@ static int server_start_targets(struct super_block *sb, struct vfsmount *mnt)
                                                  LUSTRE_MDS_OBDNAME"_uuid",
                                                  0, 0);
                         if (rc) {
-                                cfs_mutex_unlock(&server_start_lock);
+                               mutex_unlock(&server_start_lock);
                                 CERROR("failed to start MDS: %d\n", rc);
                                 RETURN(rc);
                         }
                 }
-                cfs_mutex_unlock(&server_start_lock);
+               mutex_unlock(&server_start_lock);
         }
 #endif
 
         /* If we're an OST, make sure the global OSS is running */
        if (IS_OST(lsi)) {
                 /* make sure OSS is started */
-                cfs_mutex_lock(&server_start_lock);
+               mutex_lock(&server_start_lock);
                 obd = class_name2obd(LUSTRE_OSS_OBDNAME);
                 if (!obd) {
                         rc = lustre_start_simple(LUSTRE_OSS_OBDNAME,
@@ -1815,12 +1815,12 @@ static int server_start_targets(struct super_block *sb, struct vfsmount *mnt)
                                                  LUSTRE_OSS_OBDNAME"_uuid",
                                                 0, 0, 0, 0);
                         if (rc) {
-                                cfs_mutex_unlock(&server_start_lock);
+                               mutex_unlock(&server_start_lock);
                                 CERROR("failed to start OSS: %d\n", rc);
                                 RETURN(rc);
                         }
                 }
-                cfs_mutex_unlock(&server_start_lock);
+               mutex_unlock(&server_start_lock);
         }
 
         /* Set the mgc fs to our server disk.  This allows the MGC to
@@ -2254,7 +2254,7 @@ static struct super_operations server_ops =
         .statfs         = server_statfs,
 };
 
-#define log2(n) cfs_ffz(~(n))
+#define log2(n) ffz(~(n))
 #define LUSTRE_SUPER_MAGIC 0x0BD00BD1
 
 static int server_fill_super_common(struct super_block *sb)
@@ -2938,7 +2938,7 @@ int lustre_fill_super(struct super_block *sb, void *data, int silent)
          * Disable lockdep during mount, because mount locking patterns are
          * `special'.
          */
-        cfs_lockdep_off();
+       lockdep_off();
 
         /*
          * LU-639: the obd cleanup of last mount may not finish yet, wait here.
@@ -2990,7 +2990,7 @@ out:
                 CDEBUG(D_SUPER, "Mount %s complete\n",
                        lmd->lmd_dev);
         }
-        cfs_lockdep_on();
+       lockdep_on();
         return rc;
 }
 
index 5370248..044c138 100644 (file)
@@ -106,13 +106,13 @@ static int echo_destroy_export(struct obd_export *exp)
 
  static __u64 echo_next_id(struct obd_device *obddev)
 {
-        obd_id id;
+       obd_id id;
 
-        cfs_spin_lock(&obddev->u.echo.eo_lock);
-        id = ++obddev->u.echo.eo_lastino;
-        cfs_spin_unlock(&obddev->u.echo.eo_lock);
+       spin_lock(&obddev->u.echo.eo_lock);
+       id = ++obddev->u.echo.eo_lastino;
+       spin_unlock(&obddev->u.echo.eo_lock);
 
-        return id;
+       return id;
 }
 
 static int echo_create(const struct lu_env *env, struct obd_export *exp,
@@ -561,7 +561,7 @@ static int echo_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
         ENTRY;
 
         obd->u.echo.eo_obt.obt_magic = OBT_MAGIC;
-        cfs_spin_lock_init(&obd->u.echo.eo_lock);
+       spin_lock_init(&obd->u.echo.eo_lock);
         obd->u.echo.eo_lastino = ECHO_INIT_OID;
 
         sprintf(ns_name, "echotgt-%s", obd->obd_uuid.uuid);
index b33ad4b..492ad2d 100644 (file)
@@ -88,7 +88,7 @@ struct echo_object_conf {
 
 struct echo_page {
         struct cl_page_slice   ep_cl;
-        cfs_mutex_t            ep_lock;
+       struct mutex            ep_lock;
         cfs_page_t            *ep_vmpage;
 };
 
@@ -286,8 +286,8 @@ static int echo_page_own(const struct lu_env *env,
         struct echo_page *ep = cl2echo_page(slice);
 
         if (!nonblock)
-                cfs_mutex_lock(&ep->ep_lock);
-        else if (!cfs_mutex_trylock(&ep->ep_lock))
+               mutex_lock(&ep->ep_lock);
+       else if (!mutex_trylock(&ep->ep_lock))
                 return -EAGAIN;
         return 0;
 }
@@ -298,8 +298,8 @@ static void echo_page_disown(const struct lu_env *env,
 {
         struct echo_page *ep = cl2echo_page(slice);
 
-        LASSERT(cfs_mutex_is_locked(&ep->ep_lock));
-        cfs_mutex_unlock(&ep->ep_lock);
+       LASSERT(mutex_is_locked(&ep->ep_lock));
+       mutex_unlock(&ep->ep_lock);
 }
 
 static void echo_page_discard(const struct lu_env *env,
@@ -312,7 +312,7 @@ static void echo_page_discard(const struct lu_env *env,
 static int echo_page_is_vmlocked(const struct lu_env *env,
                                  const struct cl_page_slice *slice)
 {
-        if (cfs_mutex_is_locked(&cl2echo_page(slice)->ep_lock))
+       if (mutex_is_locked(&cl2echo_page(slice)->ep_lock))
                 return -EBUSY;
         return -ENODATA;
 }
@@ -352,7 +352,7 @@ static int echo_page_print(const struct lu_env *env,
         struct echo_page *ep = cl2echo_page(slice);
 
         (*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
-                   ep, cfs_mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
+                  ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
         return 0;
 }
 
@@ -434,7 +434,7 @@ static struct cl_page *echo_page_init(const struct lu_env *env,
                 struct echo_object *eco = cl2echo_obj(obj);
                 ep->ep_vmpage = vmpage;
                 page_cache_get(vmpage);
-                cfs_mutex_init(&ep->ep_lock);
+               mutex_init(&ep->ep_lock);
                 cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
                 cfs_atomic_inc(&eco->eo_npages);
         }
@@ -519,11 +519,11 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
         eco->eo_dev = ed;
         cfs_atomic_set(&eco->eo_npages, 0);
 
-        cfs_spin_lock(&ec->ec_lock);
-        cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
-        cfs_spin_unlock(&ec->ec_lock);
+       spin_lock(&ec->ec_lock);
+       cfs_list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
+       spin_unlock(&ec->ec_lock);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 /* taken from osc_unpackmd() */
@@ -584,9 +584,9 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
 
         LASSERT(cfs_atomic_read(&eco->eo_npages) == 0);
 
-        cfs_spin_lock(&ec->ec_lock);
+       spin_lock(&ec->ec_lock);
         cfs_list_del_init(&eco->eo_obj_chain);
-        cfs_spin_unlock(&ec->ec_lock);
+       spin_unlock(&ec->ec_lock);
 
         lu_object_fini(obj);
         lu_object_header_fini(obj->lo_header);
@@ -880,14 +880,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
 
                 ls = next->ld_site;
 
-                cfs_spin_lock(&ls->ls_ld_lock);
-                cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
-                        if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
-                                found = 1;
-                                break;
-                        }
-                }
-                cfs_spin_unlock(&ls->ls_ld_lock);
+               spin_lock(&ls->ls_ld_lock);
+               cfs_list_for_each_entry(ld, &ls->ls_ld_linkage, ld_linkage) {
+                       if (strcmp(ld->ld_type->ldt_name, tgt_type_name) == 0) {
+                               found = 1;
+                               break;
+                       }
+               }
+               spin_unlock(&ls->ls_ld_lock);
 
                 if (found == 0) {
                         CERROR("%s is not lu device type!\n",
@@ -1015,29 +1015,29 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
          * all of cached objects. Anyway, probably the echo device is being
          * parallelly accessed.
          */
-        cfs_spin_lock(&ec->ec_lock);
-        cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
-                eco->eo_deleted = 1;
-        cfs_spin_unlock(&ec->ec_lock);
-
-        /* purge again */
-        lu_site_purge(env, &ed->ed_site->cs_lu, -1);
-
-        CDEBUG(D_INFO,
-               "Waiting for the reference of echo object to be dropped\n");
-
-        /* Wait for the last reference to be dropped. */
-        cfs_spin_lock(&ec->ec_lock);
-        while (!cfs_list_empty(&ec->ec_objects)) {
-                cfs_spin_unlock(&ec->ec_lock);
-                CERROR("echo_client still has objects at cleanup time, "
-                       "wait for 1 second\n");
-                cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
-                                                   cfs_time_seconds(1));
-                lu_site_purge(env, &ed->ed_site->cs_lu, -1);
-                cfs_spin_lock(&ec->ec_lock);
-        }
-        cfs_spin_unlock(&ec->ec_lock);
+       spin_lock(&ec->ec_lock);
+       cfs_list_for_each_entry(eco, &ec->ec_objects, eo_obj_chain)
+               eco->eo_deleted = 1;
+       spin_unlock(&ec->ec_lock);
+
+       /* purge again */
+       lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+
+       CDEBUG(D_INFO,
+              "Waiting for the reference of echo object to be dropped\n");
+
+       /* Wait for the last reference to be dropped. */
+       spin_lock(&ec->ec_lock);
+       while (!cfs_list_empty(&ec->ec_objects)) {
+               spin_unlock(&ec->ec_lock);
+               CERROR("echo_client still has objects at cleanup time, "
+                      "wait for 1 second\n");
+               cfs_schedule_timeout_and_set_state(CFS_TASK_UNINT,
+                                                  cfs_time_seconds(1));
+               lu_site_purge(env, &ed->ed_site->cs_lu, -1);
+               spin_lock(&ec->ec_lock);
+       }
+       spin_unlock(&ec->ec_lock);
 
         LASSERT(cfs_list_empty(&ec->ec_locks));
 
@@ -1166,7 +1166,7 @@ static int cl_echo_object_put(struct echo_object *eco)
         if (eco->eo_deleted) {
                 struct lu_object_header *loh = obj->co_lu.lo_header;
                 LASSERT(&eco->eo_hdr == luh2coh(loh));
-                cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
+               set_bit(LU_OBJECT_HEARD_BANSHEE, &loh->loh_flags);
         }
 
         cl_object_put(env, obj);
@@ -1206,18 +1206,19 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
                 rc = cl_wait(env, lck);
                 if (rc == 0) {
                         el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
-                        cfs_spin_lock(&ec->ec_lock);
-                        if (cfs_list_empty(&el->el_chain)) {
-                                cfs_list_add(&el->el_chain, &ec->ec_locks);
-                                el->el_cookie = ++ec->ec_unique;
-                        }
-                        cfs_atomic_inc(&el->el_refcount);
-                        *cookie = el->el_cookie;
-                        cfs_spin_unlock(&ec->ec_lock);
-                } else
-                        cl_lock_release(env, lck, "ec enqueue", cfs_current());
-        }
-        RETURN(rc);
+                       spin_lock(&ec->ec_lock);
+                       if (cfs_list_empty(&el->el_chain)) {
+                               cfs_list_add(&el->el_chain, &ec->ec_locks);
+                               el->el_cookie = ++ec->ec_unique;
+                       }
+                       cfs_atomic_inc(&el->el_refcount);
+                       *cookie = el->el_cookie;
+                       spin_unlock(&ec->ec_lock);
+               } else {
+                       cl_lock_release(env, lck, "ec enqueue", cfs_current());
+               }
+       }
+       RETURN(rc);
 }
 
 static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
@@ -1262,7 +1263,7 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
         ENTRY;
 
         LASSERT(ec != NULL);
-        cfs_spin_lock (&ec->ec_lock);
+       spin_lock(&ec->ec_lock);
         cfs_list_for_each (el, &ec->ec_locks) {
                 ecl = cfs_list_entry (el, struct echo_lock, el_chain);
                 CDEBUG(D_INFO, "ecl: %p, cookie: "LPX64"\n", ecl, ecl->el_cookie);
@@ -1275,7 +1276,7 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
                         break;
                 }
         }
-        cfs_spin_unlock (&ec->ec_lock);
+       spin_unlock(&ec->ec_lock);
 
         if (!found)
                 RETURN(-ENOENT);
@@ -2983,7 +2984,7 @@ static int echo_client_setup(const struct lu_env *env,
                 RETURN(-EINVAL);
         }
 
-        cfs_spin_lock_init (&ec->ec_lock);
+       spin_lock_init(&ec->ec_lock);
         CFS_INIT_LIST_HEAD (&ec->ec_objects);
         CFS_INIT_LIST_HEAD (&ec->ec_locks);
         ec->ec_unique = 0;
@@ -3011,9 +3012,9 @@ static int echo_client_setup(const struct lu_env *env,
         rc = obd_connect(env, &ec->ec_exp, tgt, &echo_uuid, ocd, NULL);
         if (rc == 0) {
                 /* Turn off pinger because it connects to tgt obd directly. */
-                cfs_spin_lock(&tgt->obd_dev_lock);
-                cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
-                cfs_spin_unlock(&tgt->obd_dev_lock);
+               spin_lock(&tgt->obd_dev_lock);
+               cfs_list_del_init(&ec->ec_exp->exp_obd_chain_timed);
+               spin_unlock(&tgt->obd_dev_lock);
         }
 
         OBD_FREE(ocd, sizeof(*ocd));
index 0bd7095..414e7f2 100644 (file)
@@ -134,9 +134,9 @@ static int lprocfs_ofd_wr_grant_ratio(struct file *file, const char *buffer,
                      "a huge part of the free space is now reserved for "
                      "grants\n", obd->obd_name);
 
-       cfs_spin_lock(&ofd->ofd_grant_lock);
+       spin_lock(&ofd->ofd_grant_lock);
        ofd->ofd_grant_ratio = ofd_grant_ratio_conv(val);
-       cfs_spin_unlock(&ofd->ofd_grant_lock);
+       spin_unlock(&ofd->ofd_grant_lock);
        return count;
 }
 
@@ -166,9 +166,9 @@ static int lprocfs_ofd_wr_precreate_batch(struct file *file, const char *buffer,
        if (val < 1)
                return -EINVAL;
 
-       cfs_spin_lock(&ofd->ofd_objid_lock);
+       spin_lock(&ofd->ofd_objid_lock);
        ofd->ofd_precreate_batch = val;
-       cfs_spin_unlock(&ofd->ofd_objid_lock);
+       spin_unlock(&ofd->ofd_objid_lock);
        return count;
 }
 
@@ -317,9 +317,9 @@ int lprocfs_ofd_wr_degraded(struct file *file, const char *buffer,
        if (rc)
                return rc;
 
-       cfs_spin_lock(&ofd->ofd_flags_lock);
+       spin_lock(&ofd->ofd_flags_lock);
        ofd->ofd_raid_degraded = !!val;
-       cfs_spin_unlock(&ofd->ofd_flags_lock);
+       spin_unlock(&ofd->ofd_flags_lock);
 
        return count;
 }
@@ -363,10 +363,10 @@ int lprocfs_ofd_wr_syncjournal(struct file *file, const char *buffer,
        if (val < 0)
                return -EINVAL;
 
-       cfs_spin_lock(&ofd->ofd_flags_lock);
+       spin_lock(&ofd->ofd_flags_lock);
        ofd->ofd_syncjournal = !!val;
        ofd_slc_set(ofd);
-       cfs_spin_unlock(&ofd->ofd_flags_lock);
+       spin_unlock(&ofd->ofd_flags_lock);
 
        return count;
 }
@@ -413,9 +413,9 @@ int lprocfs_ofd_wr_sync_lock_cancel(struct file *file, const char *buffer,
        if (val < 0 || val > 2)
                return -EINVAL;
 
-       cfs_spin_lock(&ofd->ofd_flags_lock);
+       spin_lock(&ofd->ofd_flags_lock);
        ofd->ofd_sync_lock_cancel = val;
-       cfs_spin_unlock(&ofd->ofd_flags_lock);
+       spin_unlock(&ofd->ofd_flags_lock);
        return count;
 }
 
@@ -445,9 +445,9 @@ int lprocfs_ofd_wr_grant_compat_disable(struct file *file, const char *buffer,
        if (val < 0)
                return -EINVAL;
 
-       cfs_spin_lock(&ofd->ofd_flags_lock);
+       spin_lock(&ofd->ofd_flags_lock);
        ofd->ofd_grant_compat_disable = !!val;
-       cfs_spin_unlock(&ofd->ofd_flags_lock);
+       spin_unlock(&ofd->ofd_flags_lock);
 
        return count;
 }
index 6505442..291f772 100644 (file)
@@ -53,7 +53,7 @@ int ofd_update_capa_key(struct ofd_device *ofd, struct lustre_capa_key *new)
        struct filter_capa_key  *k, *keys[2] = { NULL, NULL };
        int                      i;
 
-       cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
        cfs_list_for_each_entry(k, &obd->u.filter.fo_capa_keys, k_list) {
                if (k->k_key.lk_seq != new->lk_seq)
                        continue;
@@ -66,7 +66,7 @@ int ofd_update_capa_key(struct ofd_device *ofd, struct lustre_capa_key *new)
                        keys[0] = k;
                }
        }
-       cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 
        for (i = 0; i < 2; i++) {
                if (!keys[i])
@@ -76,9 +76,9 @@ int ofd_update_capa_key(struct ofd_device *ofd, struct lustre_capa_key *new)
                /* maybe because of recovery or other reasons, MDS sent the
                 * the old capability key again.
                 */
-               cfs_spin_lock(&capa_lock);
+               spin_lock(&capa_lock);
                keys[i]->k_key = *new;
-               cfs_spin_unlock(&capa_lock);
+               spin_unlock(&capa_lock);
 
                RETURN(0);
        }
@@ -93,11 +93,11 @@ int ofd_update_capa_key(struct ofd_device *ofd, struct lustre_capa_key *new)
                CFS_INIT_LIST_HEAD(&k->k_list);
        }
 
-       cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
        k->k_key = *new;
        if (cfs_list_empty(&k->k_list))
                cfs_list_add(&k->k_list, &obd->u.filter.fo_capa_keys);
-       cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 
        DEBUG_CAPA_KEY(D_SEC, new, "new");
        RETURN(0);
@@ -152,12 +152,12 @@ int ofd_auth_capa(struct obd_export *exp, struct lu_fid *fid, obd_seq seq,
 
        oc = capa_lookup(filter->fo_capa_hash, capa, 0);
        if (oc) {
-               cfs_spin_lock(&oc->c_lock);
+               spin_lock(&oc->c_lock);
                if (capa_is_expired(oc)) {
                        DEBUG_CAPA(D_ERROR, capa, "expired");
                        rc = -ESTALE;
                }
-               cfs_spin_unlock(&oc->c_lock);
+               spin_unlock(&oc->c_lock);
 
                capa_put(oc);
                RETURN(rc);
@@ -168,7 +168,7 @@ int ofd_auth_capa(struct obd_export *exp, struct lu_fid *fid, obd_seq seq,
                RETURN(-ESTALE);
        }
 
-       cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
        cfs_list_for_each_entry(k, &filter->fo_capa_keys, k_list) {
                if (k->k_key.lk_seq == seq) {
                        keys_ready = 1;
@@ -179,7 +179,7 @@ int ofd_auth_capa(struct obd_export *exp, struct lu_fid *fid, obd_seq seq,
                        }
                }
        }
-       cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 
        if (!keys_ready) {
                CDEBUG(D_SEC, "MDS hasn't propagated capability keys yet, "
@@ -222,10 +222,10 @@ void ofd_free_capa_keys(struct ofd_device *ofd)
        struct obd_device       *obd = ofd_obd(ofd);
        struct filter_capa_key  *key, *n;
 
-       cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
        cfs_list_for_each_entry_safe(key, n, &obd->u.filter.fo_capa_keys, k_list) {
                cfs_list_del_init(&key->k_list);
                OBD_FREE_PTR(key);
        }
-       cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 }
index b315be0..2ae11c3 100644 (file)
@@ -501,27 +501,27 @@ static int ofd_init0(const struct lu_env *env, struct ofd_device *m,
        m->ofd_fmd_max_num = OFD_FMD_MAX_NUM_DEFAULT;
        m->ofd_fmd_max_age = OFD_FMD_MAX_AGE_DEFAULT;
 
-       cfs_spin_lock_init(&m->ofd_flags_lock);
+       spin_lock_init(&m->ofd_flags_lock);
        m->ofd_raid_degraded = 0;
        m->ofd_syncjournal = 0;
        ofd_slc_set(m);
        m->ofd_grant_compat_disable = 0;
 
        /* statfs data */
-       cfs_spin_lock_init(&m->ofd_osfs_lock);
+       spin_lock_init(&m->ofd_osfs_lock);
        m->ofd_osfs_age = cfs_time_shift_64(-1000);
        m->ofd_osfs_unstable = 0;
        m->ofd_statfs_inflight = 0;
        m->ofd_osfs_inflight = 0;
 
        /* grant data */
-       cfs_spin_lock_init(&m->ofd_grant_lock);
+       spin_lock_init(&m->ofd_grant_lock);
        m->ofd_tot_dirty = 0;
        m->ofd_tot_granted = 0;
        m->ofd_tot_pending = 0;
        m->ofd_max_group = 0;
 
-       cfs_rwlock_init(&obd->u.filter.fo_sptlrpc_lock);
+       rwlock_init(&obd->u.filter.fo_sptlrpc_lock);
        sptlrpc_rule_set_init(&obd->u.filter.fo_sptlrpc_rset);
 
        obd->u.filter.fo_fl_oss_capa = 0;
@@ -575,7 +575,7 @@ static int ofd_init0(const struct lu_env *env, struct ofd_device *m,
                                obd->obd_name, osfs->os_bsize);
                GOTO(err_fini_stack, rc = -EPROTO);
        }
-       m->ofd_blockbits = cfs_fls(osfs->os_bsize) - 1;
+       m->ofd_blockbits = fls(osfs->os_bsize) - 1;
 
        snprintf(info->fti_u.name, sizeof(info->fti_u.name), "filter-%p", m);
        m->ofd_namespace = ldlm_namespace_new(obd, info->fti_u.name,
index 8663f9e..b23b713 100644 (file)
@@ -66,9 +66,9 @@ void ofd_fmd_put(struct obd_export *exp, struct ofd_mod_data *fmd)
        if (fmd == NULL)
                return;
 
-       cfs_spin_lock(&fed->fed_lock);
+       spin_lock(&fed->fed_lock);
        ofd_fmd_put_nolock(exp, fmd); /* caller reference */
-       cfs_spin_unlock(&fed->fed_lock);
+       spin_unlock(&fed->fed_lock);
 }
 
 /* expire entries from the end of the list if there are too many
@@ -99,9 +99,9 @@ void ofd_fmd_expire(struct obd_export *exp)
 {
        struct filter_export_data *fed = &exp->exp_filter_data;
 
-       cfs_spin_lock(&fed->fed_lock);
+       spin_lock(&fed->fed_lock);
        ofd_fmd_expire_nolock(exp, NULL);
-       cfs_spin_unlock(&fed->fed_lock);
+       spin_unlock(&fed->fed_lock);
 }
 
 /* find specified fid in fed_fmd_list.
@@ -139,11 +139,11 @@ struct ofd_mod_data *ofd_fmd_find(struct obd_export *exp,
        struct filter_export_data       *fed = &exp->exp_filter_data;
        struct ofd_mod_data             *fmd;
 
-       cfs_spin_lock(&fed->fed_lock);
+       spin_lock(&fed->fed_lock);
        fmd = ofd_fmd_find_nolock(exp, fid);
        if (fmd)
                fmd->fmd_refcount++;    /* caller reference */
-       cfs_spin_unlock(&fed->fed_lock);
+       spin_unlock(&fed->fed_lock);
 
        return fmd;
 }
@@ -163,7 +163,7 @@ struct ofd_mod_data *ofd_fmd_get(struct obd_export *exp, struct lu_fid *fid)
 
        OBD_SLAB_ALLOC_PTR(fmd_new, ll_fmd_cachep);
 
-       cfs_spin_lock(&fed->fed_lock);
+       spin_lock(&fed->fed_lock);
        found = ofd_fmd_find_nolock(exp, fid);
        if (fmd_new) {
                if (found == NULL) {
@@ -182,7 +182,7 @@ struct ofd_mod_data *ofd_fmd_get(struct obd_export *exp, struct lu_fid *fid)
                found->fmd_expire = cfs_time_add(now, ofd->ofd_fmd_max_age);
        }
 
-       cfs_spin_unlock(&fed->fed_lock);
+       spin_unlock(&fed->fed_lock);
 
        return found;
 }
@@ -197,13 +197,13 @@ void ofd_fmd_drop(struct obd_export *exp, struct lu_fid *fid)
        struct filter_export_data       *fed = &exp->exp_filter_data;
        struct ofd_mod_data             *found = NULL;
 
-       cfs_spin_lock(&fed->fed_lock);
+       spin_lock(&fed->fed_lock);
        found = ofd_fmd_find_nolock(exp, fid);
        if (found) {
                cfs_list_del_init(&found->fmd_list);
                ofd_fmd_put_nolock(exp, found);
        }
-       cfs_spin_unlock(&fed->fed_lock);
+       spin_unlock(&fed->fed_lock);
 }
 #endif
 
@@ -213,7 +213,7 @@ void ofd_fmd_cleanup(struct obd_export *exp)
        struct filter_export_data       *fed = &exp->exp_filter_data;
        struct ofd_mod_data             *fmd = NULL, *tmp;
 
-       cfs_spin_lock(&fed->fed_lock);
+       spin_lock(&fed->fed_lock);
        cfs_list_for_each_entry_safe(fmd, tmp, &fed->fed_mod_list, fmd_list) {
                cfs_list_del_init(&fmd->fmd_list);
                if (fmd->fmd_refcount > 1) {
@@ -222,7 +222,7 @@ void ofd_fmd_cleanup(struct obd_export *exp)
                }
                ofd_fmd_put_nolock(exp, fmd);
        }
-       cfs_spin_unlock(&fed->fed_lock);
+       spin_unlock(&fed->fed_lock);
 }
 
 int ofd_fmd_init(void)
index 758f4a2..0d881dc 100644 (file)
@@ -72,9 +72,9 @@ int ofd_precreate_batch(struct ofd_device *ofd, int batch)
 {
        int count;
 
-       cfs_spin_lock(&ofd->ofd_objid_lock);
+       spin_lock(&ofd->ofd_objid_lock);
        count = min(ofd->ofd_precreate_batch, batch);
-       cfs_spin_unlock(&ofd->ofd_objid_lock);
+       spin_unlock(&ofd->ofd_objid_lock);
 
        return count;
 }
@@ -85,9 +85,9 @@ obd_id ofd_last_id(struct ofd_device *ofd, obd_seq group)
 
        LASSERT(group <= ofd->ofd_max_group);
 
-       cfs_spin_lock(&ofd->ofd_objid_lock);
+       spin_lock(&ofd->ofd_objid_lock);
        id = ofd->ofd_last_objids[group];
-       cfs_spin_unlock(&ofd->ofd_objid_lock);
+       spin_unlock(&ofd->ofd_objid_lock);
 
        return id;
 }
@@ -95,10 +95,10 @@ obd_id ofd_last_id(struct ofd_device *ofd, obd_seq group)
 void ofd_last_id_set(struct ofd_device *ofd, obd_id id, obd_seq group)
 {
        LASSERT(group <= ofd->ofd_max_group);
-       cfs_spin_lock(&ofd->ofd_objid_lock);
+       spin_lock(&ofd->ofd_objid_lock);
        if (ofd->ofd_last_objids[group] < id)
                ofd->ofd_last_objids[group] = id;
-       cfs_spin_unlock(&ofd->ofd_objid_lock);
+       spin_unlock(&ofd->ofd_objid_lock);
 }
 
 int ofd_last_id_write(const struct lu_env *env, struct ofd_device *ofd,
@@ -179,7 +179,7 @@ int ofd_group_load(const struct lu_env *env, struct ofd_device *ofd, int group)
                RETURN(PTR_ERR(dob));
 
        ofd->ofd_lastid_obj[group] = dob;
-       cfs_mutex_init(&ofd->ofd_create_locks[group]);
+       mutex_init(&ofd->ofd_create_locks[group]);
 
        rc = dt_attr_get(env, dob, &info->fti_attr, BYPASS_CAPA);
        if (rc)
@@ -225,7 +225,7 @@ int ofd_groups_init(const struct lu_env *env, struct ofd_device *ofd)
 
        ENTRY;
 
-       cfs_spin_lock_init(&ofd->ofd_objid_lock);
+       spin_lock_init(&ofd->ofd_objid_lock);
 
        rc = dt_attr_get(env, ofd->ofd_last_group_file,
                         &info->fti_attr, BYPASS_CAPA);
@@ -340,10 +340,10 @@ int ofd_clients_data_init(const struct lu_env *env, struct ofd_device *ofd,
                LASSERTF(rc == 0, "rc = %d\n", rc); /* can't fail existing */
                /* VBR: set export last committed version */
                exp->exp_last_committed = last_rcvd;
-               cfs_spin_lock(&exp->exp_lock);
+               spin_lock(&exp->exp_lock);
                exp->exp_connecting = 0;
                exp->exp_in_recovery = 0;
-               cfs_spin_unlock(&exp->exp_lock);
+               spin_unlock(&exp->exp_lock);
                obd->obd_max_recoverable_clients++;
                class_export_put(exp);
 
@@ -351,10 +351,10 @@ int ofd_clients_data_init(const struct lu_env *env, struct ofd_device *ofd,
                CDEBUG(D_OTHER, "client at idx %d has last_rcvd = "LPU64"\n",
                       cl_idx, last_rcvd);
 
-               cfs_spin_lock(&ofd->ofd_lut.lut_translock);
+               spin_lock(&ofd->ofd_lut.lut_translock);
                if (last_rcvd > lsd->lsd_last_transno)
                        lsd->lsd_last_transno = last_rcvd;
-               cfs_spin_unlock(&ofd->ofd_lut.lut_translock);
+               spin_unlock(&ofd->ofd_lut.lut_translock);
        }
 
 err_out:
@@ -451,10 +451,10 @@ int ofd_server_data_init(const struct lu_env *env, struct ofd_device *ofd)
 
        rc = ofd_clients_data_init(env, ofd, last_rcvd_size);
 
-       cfs_spin_lock(&ofd->ofd_lut.lut_translock);
+       spin_lock(&ofd->ofd_lut.lut_translock);
        obd->obd_last_committed = lsd->lsd_last_transno;
        ofd->ofd_lut.lut_last_transno = lsd->lsd_last_transno;
-       cfs_spin_unlock(&ofd->ofd_lut.lut_translock);
+       spin_unlock(&ofd->ofd_lut.lut_translock);
 
        /* save it, so mount count and last_transno is current */
        rc = tgt_server_data_update(env, &ofd->ofd_lut, 0);
index 6d85215..03b3e25 100644 (file)
@@ -108,8 +108,8 @@ void ofd_grant_sanity_check(struct obd_device *obd, const char *func)
 
        maxsize = ofd->ofd_osfs.os_blocks << ofd->ofd_blockbits;
 
-       cfs_spin_lock(&obd->obd_dev_lock);
-       cfs_spin_lock(&ofd->ofd_grant_lock);
+       spin_lock(&obd->obd_dev_lock);
+       spin_lock(&ofd->ofd_grant_lock);
        cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
                int error = 0;
 
@@ -128,16 +128,16 @@ void ofd_grant_sanity_check(struct obd_device *obd, const char *func)
                               " > maxsize("LPU64")\n", obd->obd_name,
                               exp->exp_client_uuid.uuid, exp, fed->fed_grant,
                               fed->fed_pending, maxsize);
-                       cfs_spin_unlock(&obd->obd_dev_lock);
-                       cfs_spin_unlock(&ofd->ofd_grant_lock);
+                       spin_unlock(&obd->obd_dev_lock);
+                       spin_unlock(&ofd->ofd_grant_lock);
                        LBUG();
                }
                if (fed->fed_dirty > maxsize) {
                        CERROR("%s: cli %s/%p fed_dirty(%ld) > maxsize("LPU64
                               ")\n", obd->obd_name, exp->exp_client_uuid.uuid,
                               exp, fed->fed_dirty, maxsize);
-                       cfs_spin_unlock(&obd->obd_dev_lock);
-                       cfs_spin_unlock(&ofd->ofd_grant_lock);
+                       spin_unlock(&obd->obd_dev_lock);
+                       spin_unlock(&ofd->ofd_grant_lock);
                        LBUG();
                }
                CDEBUG_LIMIT(error ? D_ERROR : D_CACHE, "%s: cli %s/%p dirty "
@@ -148,7 +148,7 @@ void ofd_grant_sanity_check(struct obd_device *obd, const char *func)
                tot_pending += fed->fed_pending;
                tot_dirty += fed->fed_dirty;
        }
-       cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
        fo_tot_granted = ofd->ofd_tot_granted;
        fo_tot_pending = ofd->ofd_tot_pending;
        fo_tot_dirty = ofd->ofd_tot_dirty;
@@ -171,7 +171,7 @@ void ofd_grant_sanity_check(struct obd_device *obd, const char *func)
        if (tot_dirty > maxsize)
                CERROR("%s: tot_dirty "LPU64" > maxsize "LPU64"\n",
                       func, tot_dirty, maxsize);
-       cfs_spin_unlock(&ofd->ofd_grant_lock);
+       spin_unlock(&ofd->ofd_grant_lock);
 }
 
 /**
@@ -230,11 +230,11 @@ static obd_size ofd_grant_space_left(struct obd_export *exp)
        ENTRY;
        LASSERT_SPIN_LOCKED(&ofd->ofd_grant_lock);
 
-       cfs_spin_lock(&ofd->ofd_osfs_lock);
+       spin_lock(&ofd->ofd_osfs_lock);
        /* get available space from cached statfs data */
        left = ofd->ofd_osfs.os_bavail << ofd->ofd_blockbits;
        unstable = ofd->ofd_osfs_unstable; /* those might be accounted twice */
-       cfs_spin_unlock(&ofd->ofd_osfs_lock);
+       spin_unlock(&ofd->ofd_osfs_lock);
 
        tot_granted = ofd->ofd_tot_granted;
 
@@ -345,7 +345,7 @@ static void ofd_grant_incoming(const struct lu_env *env, struct obd_export *exp,
                CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
                       obd->obd_name, exp->exp_client_uuid.uuid, exp,
                       fed->fed_dirty, fed->fed_pending, fed->fed_grant);
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                LBUG();
        }
        EXIT;
@@ -565,7 +565,7 @@ static void ofd_grant_check(const struct lu_env *env, struct obd_export *exp,
                CERROR("%s: cli %s/%p dirty %ld pend %ld grant %ld\n",
                       obd->obd_name, exp->exp_client_uuid.uuid, exp,
                       fed->fed_dirty, fed->fed_pending, fed->fed_grant);
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                LBUG();
        }
        EXIT;
@@ -642,7 +642,7 @@ static long ofd_grant(struct obd_export *exp, obd_size curgrant,
                CERROR("%s: cli %s/%p grant %ld want "LPU64" current "LPU64"\n",
                       obd->obd_name, exp->exp_client_uuid.uuid, exp,
                       fed->fed_grant, want, curgrant);
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                LBUG();
        }
 
@@ -684,7 +684,7 @@ long ofd_grant_connect(const struct lu_env *env, struct obd_export *exp,
 refresh:
        ofd_grant_statfs(env, exp, force, &from_cache);
 
-       cfs_spin_lock(&ofd->ofd_grant_lock);
+       spin_lock(&ofd->ofd_grant_lock);
 
        /* Grab free space from cached info and take out space already granted
         * to clients as well as reserved space */
@@ -692,7 +692,7 @@ refresh:
 
        /* get fresh statfs data if we are short in ungranted space */
        if (from_cache && left < 32 * ofd_grant_chunk(exp, ofd)) {
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                CDEBUG(D_CACHE, "fs has no space left and statfs too old\n");
                force = 1;
                goto refresh;
@@ -705,7 +705,7 @@ refresh:
        grant = ofd_grant_to_cli(exp, ofd, (obd_size)fed->fed_grant);
        ofd->ofd_tot_granted_clients++;
 
-       cfs_spin_unlock(&ofd->ofd_grant_lock);
+       spin_unlock(&ofd->ofd_grant_lock);
 
        CDEBUG(D_CACHE, "%s: cli %s/%p ocd_grant: %ld want: "LPU64" left: "
               LPU64"\n", exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
@@ -728,7 +728,7 @@ void ofd_grant_discard(struct obd_export *exp)
        struct ofd_device               *ofd = ofd_exp(exp);
        struct filter_export_data       *fed = &exp->exp_filter_data;
 
-       cfs_spin_lock(&ofd->ofd_grant_lock);
+       spin_lock(&ofd->ofd_grant_lock);
        LASSERTF(ofd->ofd_tot_granted >= fed->fed_grant,
                 "%s: tot_granted "LPU64" cli %s/%p fed_grant %ld\n",
                 obd->obd_name, ofd->ofd_tot_granted,
@@ -747,7 +747,7 @@ void ofd_grant_discard(struct obd_export *exp)
                 exp->exp_client_uuid.uuid, exp, fed->fed_dirty);
        ofd->ofd_tot_dirty -= fed->fed_dirty;
        fed->fed_dirty = 0;
-       cfs_spin_unlock(&ofd->ofd_grant_lock);
+       spin_unlock(&ofd->ofd_grant_lock);
 }
 
 /**
@@ -783,7 +783,7 @@ void ofd_grant_prepare_read(const struct lu_env *env,
                ofd_grant_statfs(env, exp, 1, NULL);
 
                /* protect all grant counters */
-               cfs_spin_lock(&ofd->ofd_grant_lock);
+               spin_lock(&ofd->ofd_grant_lock);
 
                /* Grab free space from cached statfs data and take out space
                 * already granted to clients as well as reserved space */
@@ -796,7 +796,7 @@ void ofd_grant_prepare_read(const struct lu_env *env,
                 * since we don't grant space back on reads, no point
                 * in running statfs, so just skip it and process
                 * incoming grant data directly. */
-               cfs_spin_lock(&ofd->ofd_grant_lock);
+               spin_lock(&ofd->ofd_grant_lock);
                do_shrink = 0;
        }
 
@@ -810,7 +810,7 @@ void ofd_grant_prepare_read(const struct lu_env *env,
        else
                oa->o_grant = 0;
 
-       cfs_spin_unlock(&ofd->ofd_grant_lock);
+       spin_unlock(&ofd->ofd_grant_lock);
 }
 
 /**
@@ -840,7 +840,7 @@ refresh:
        /* get statfs information from OSD layer */
        ofd_grant_statfs(env, exp, force, &from_cache);
 
-       cfs_spin_lock(&ofd->ofd_grant_lock); /* protect all grant counters */
+       spin_lock(&ofd->ofd_grant_lock); /* protect all grant counters */
 
        /* Grab free space from cached statfs data and take out space already
         * granted to clients as well as reserved space */
@@ -848,7 +848,7 @@ refresh:
 
        /* Get fresh statfs data if we are short in ungranted space */
        if (from_cache && left < 32 * ofd_grant_chunk(exp, ofd)) {
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                CDEBUG(D_CACHE, "%s: fs has no space left and statfs too old\n",
                       obd->obd_name);
                force = 1;
@@ -873,7 +873,7 @@ refresh:
                if (!from_grant) {
                        /* at least one network buffer requires acquiring grant
                         * space on the server */
-                       cfs_spin_unlock(&ofd->ofd_grant_lock);
+                       spin_unlock(&ofd->ofd_grant_lock);
                        /* discard errors, at least we tried ... */
                        rc = dt_sync(env, ofd->ofd_osd);
                        force = 2;
@@ -888,7 +888,7 @@ refresh:
        ofd_grant_check(env, exp, oa, rnb, niocount, &left);
 
        if (!(oa->o_valid & OBD_MD_FLGRANT)) {
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                RETURN_EXIT;
        }
 
@@ -900,7 +900,7 @@ refresh:
        else
                /* grant more space back to the client if possible */
                oa->o_grant = ofd_grant(exp, oa->o_grant, oa->o_undirty, left);
-       cfs_spin_unlock(&ofd->ofd_grant_lock);
+       spin_unlock(&ofd->ofd_grant_lock);
 }
 
 /**
@@ -933,13 +933,13 @@ int ofd_grant_create(const struct lu_env *env, struct obd_export *exp, int *nr)
        ofd_grant_statfs(env, exp, 1, NULL);
 
        /* protect all grant counters */
-       cfs_spin_lock(&ofd->ofd_grant_lock);
+       spin_lock(&ofd->ofd_grant_lock);
 
        /* fail precreate request if there is not enough blocks available for
         * writing */
        if (ofd->ofd_osfs.os_bavail - (fed->fed_grant >> ofd->ofd_blockbits) <
            (ofd->ofd_osfs.os_blocks >> 10)) {
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                CDEBUG(D_RPCTRACE, "%s: not enough space for create "LPU64"\n",
                       ofd_obd(ofd)->obd_name,
                       ofd->ofd_osfs.os_bavail * ofd->ofd_osfs.os_blocks);
@@ -961,7 +961,7 @@ int ofd_grant_create(const struct lu_env *env, struct obd_export *exp, int *nr)
                if (*nr == 0) {
                        /* we really have no space any more for precreation,
                         * fail the precreate request with ENOSPC */
-                       cfs_spin_unlock(&ofd->ofd_grant_lock);
+                       spin_unlock(&ofd->ofd_grant_lock);
                        RETURN(-ENOSPC);
                }
                /* compute space needed for the new number of creations */
@@ -985,7 +985,7 @@ int ofd_grant_create(const struct lu_env *env, struct obd_export *exp, int *nr)
        /* grant more space (twice as much as needed for this request) for
         * precreate purpose if possible */
        ofd_grant(exp, fed->fed_grant, wanted * 2, left);
-       cfs_spin_unlock(&ofd->ofd_grant_lock);
+       spin_unlock(&ofd->ofd_grant_lock);
        RETURN(0);
 }
 
@@ -1010,7 +1010,7 @@ void ofd_grant_commit(const struct lu_env *env, struct obd_export *exp,
        if (pending == 0)
                RETURN_EXIT;
 
-       cfs_spin_lock(&ofd->ofd_grant_lock);
+       spin_lock(&ofd->ofd_grant_lock);
        /* Don't update statfs data for errors raised before commit (e.g.
         * bulk transfer failed, ...) since we know those writes have not been
         * processed. For other errors hit during commit, we cannot really tell
@@ -1018,7 +1018,7 @@ void ofd_grant_commit(const struct lu_env *env, struct obd_export *exp,
         * In any case, this should not be fatal since we always get fresh
         * statfs data before failing a request with ENOSPC */
        if (rc == 0) {
-               cfs_spin_lock(&ofd->ofd_osfs_lock);
+               spin_lock(&ofd->ofd_osfs_lock);
                /* Take pending out of cached statfs data */
                ofd->ofd_osfs.os_bavail -= min_t(obd_size,
                                                 ofd->ofd_osfs.os_bavail,
@@ -1027,14 +1027,14 @@ void ofd_grant_commit(const struct lu_env *env, struct obd_export *exp,
                        /* someone is running statfs and want to be notified of
                         * writes happening meanwhile */
                        ofd->ofd_osfs_inflight += pending;
-               cfs_spin_unlock(&ofd->ofd_osfs_lock);
+               spin_unlock(&ofd->ofd_osfs_lock);
        }
 
        if (exp->exp_filter_data.fed_pending < pending) {
                CERROR("%s: cli %s/%p fed_pending(%lu) < grant_used(%lu)\n",
                       exp->exp_obd->obd_name, exp->exp_client_uuid.uuid, exp,
                       exp->exp_filter_data.fed_pending, pending);
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                LBUG();
        }
        exp->exp_filter_data.fed_pending -= pending;
@@ -1044,7 +1044,7 @@ void ofd_grant_commit(const struct lu_env *env, struct obd_export *exp,
                        "\n", exp->exp_obd->obd_name,
                        exp->exp_client_uuid.uuid, exp, ofd->ofd_tot_granted,
                        pending);
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                LBUG();
        }
        ofd->ofd_tot_granted -= pending;
@@ -1053,10 +1053,10 @@ void ofd_grant_commit(const struct lu_env *env, struct obd_export *exp,
                 CERROR("%s: cli %s/%p tot_pending("LPU64") < grant_used(%lu)"
                        "\n", exp->exp_obd->obd_name, exp->exp_client_uuid.uuid,
                        exp, ofd->ofd_tot_pending, pending);
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
                LBUG();
        }
        ofd->ofd_tot_pending -= pending;
-       cfs_spin_unlock(&ofd->ofd_grant_lock);
+       spin_unlock(&ofd->ofd_grant_lock);
        EXIT;
 }
index 827834f..45b8e3d 100644 (file)
@@ -111,14 +111,14 @@ struct ofd_device {
 
        int                      ofd_max_group;
        obd_id                   ofd_last_objids[OFD_MAX_GROUPS];
-       cfs_mutex_t              ofd_create_locks[OFD_MAX_GROUPS];
+       struct mutex             ofd_create_locks[OFD_MAX_GROUPS];
        struct dt_object        *ofd_lastid_obj[OFD_MAX_GROUPS];
-       cfs_spinlock_t           ofd_objid_lock;
+       spinlock_t               ofd_objid_lock;
        unsigned long            ofd_destroys_in_progress;
        int                      ofd_precreate_batch;
 
        /* protect all statfs-related counters */
-       cfs_spinlock_t           ofd_osfs_lock;
+       spinlock_t               ofd_osfs_lock;
        /* statfs optimization: we cache a bit  */
        struct obd_statfs        ofd_osfs;
        __u64                    ofd_osfs_age;
@@ -135,7 +135,7 @@ struct ofd_device {
 
        /* grants: all values in bytes */
        /* grant lock to protect all grant counters */
-       cfs_spinlock_t           ofd_grant_lock;
+       spinlock_t               ofd_grant_lock;
        /* total amount of dirty data reported by clients in incoming obdo */
        obd_size                 ofd_tot_dirty;
        /* sum of filesystem space granted to clients for async writes */
@@ -154,7 +154,7 @@ struct ofd_device {
        int                      ofd_fmd_max_num; /* per ofd ofd_mod_data */
        cfs_duration_t           ofd_fmd_max_age; /* time to fmd expiry */
 
-       cfs_spinlock_t           ofd_flags_lock;
+       spinlock_t               ofd_flags_lock;
        unsigned long            ofd_raid_degraded:1,
                                 /* sync journal on writes */
                                 ofd_syncjournal:1,
index 471b541..5242519 100644 (file)
@@ -80,7 +80,7 @@ static int ofd_export_stats_init(struct ofd_device *ofd,
                GOTO(clean, rc = -ENOMEM);
 
        for (i = 0; i < BRW_LAST; i++)
-               cfs_spin_lock_init(&stats->nid_brw_stats->hist[i].oh_lock);
+               spin_lock_init(&stats->nid_brw_stats->hist[i].oh_lock);
 
        rc = lprocfs_seq_create(stats->nid_proc, "brw_stats", 0644,
                                &ofd_per_nid_stats_fops, stats);
@@ -366,11 +366,11 @@ static int ofd_init_export(struct obd_export *exp)
 {
        int rc;
 
-       cfs_spin_lock_init(&exp->exp_filter_data.fed_lock);
+       spin_lock_init(&exp->exp_filter_data.fed_lock);
        CFS_INIT_LIST_HEAD(&exp->exp_filter_data.fed_mod_list);
-       cfs_spin_lock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
        exp->exp_connecting = 1;
-       cfs_spin_unlock(&exp->exp_lock);
+       spin_unlock(&exp->exp_lock);
 
        /* self-export doesn't need client data and ldlm initialization */
        if (unlikely(obd_uuid_equals(&exp->exp_obd->obd_uuid,
@@ -469,10 +469,10 @@ static int ofd_adapt_sptlrpc_conf(const struct lu_env *env,
 
        sptlrpc_target_update_exp_flavor(obd, &tmp_rset);
 
-       cfs_write_lock(&fo->fo_sptlrpc_lock);
+       write_lock(&fo->fo_sptlrpc_lock);
        sptlrpc_rule_set_free(&fo->fo_sptlrpc_rset);
        fo->fo_sptlrpc_rset = tmp_rset;
-       cfs_write_unlock(&fo->fo_sptlrpc_lock);
+       write_unlock(&fo->fo_sptlrpc_lock);
 
        return 0;
 }
@@ -620,7 +620,7 @@ int ofd_statfs_internal(const struct lu_env *env, struct ofd_device *ofd,
 {
        int rc;
 
-       cfs_spin_lock(&ofd->ofd_osfs_lock);
+       spin_lock(&ofd->ofd_osfs_lock);
        if (cfs_time_before_64(ofd->ofd_osfs_age, max_age) || max_age == 0) {
                obd_size unstable;
 
@@ -639,7 +639,7 @@ int ofd_statfs_internal(const struct lu_env *env, struct ofd_device *ofd,
                /* record value of inflight counter before running statfs to
                 * compute the diff once statfs is completed */
                unstable = ofd->ofd_osfs_inflight;
-               cfs_spin_unlock(&ofd->ofd_osfs_lock);
+               spin_unlock(&ofd->ofd_osfs_lock);
 
                /* statfs can sleep ... hopefully not for too long since we can
                 * call it fairly often as space fills up */
@@ -647,8 +647,8 @@ int ofd_statfs_internal(const struct lu_env *env, struct ofd_device *ofd,
                if (unlikely(rc))
                        return rc;
 
-               cfs_spin_lock(&ofd->ofd_grant_lock);
-               cfs_spin_lock(&ofd->ofd_osfs_lock);
+               spin_lock(&ofd->ofd_grant_lock);
+               spin_lock(&ofd->ofd_osfs_lock);
                /* calculate how much space was written while we released the
                 * ofd_osfs_lock */
                unstable = ofd->ofd_osfs_inflight - unstable;
@@ -672,7 +672,7 @@ int ofd_statfs_internal(const struct lu_env *env, struct ofd_device *ofd,
                /* similarly, there is some uncertainty on write requests
                 * between prepare & commit */
                ofd->ofd_osfs_unstable += ofd->ofd_tot_pending;
-               cfs_spin_unlock(&ofd->ofd_grant_lock);
+               spin_unlock(&ofd->ofd_grant_lock);
 
                /* finally udpate cached statfs data */
                ofd->ofd_osfs = *osfs;
@@ -681,14 +681,14 @@ int ofd_statfs_internal(const struct lu_env *env, struct ofd_device *ofd,
                ofd->ofd_statfs_inflight--; /* stop tracking */
                if (ofd->ofd_statfs_inflight == 0)
                        ofd->ofd_osfs_inflight = 0;
-               cfs_spin_unlock(&ofd->ofd_osfs_lock);
+               spin_unlock(&ofd->ofd_osfs_lock);
 
                if (from_cache)
                        *from_cache = 0;
        } else {
                /* use cached statfs data */
                *osfs = ofd->ofd_osfs;
-               cfs_spin_unlock(&ofd->ofd_osfs_lock);
+               spin_unlock(&ofd->ofd_osfs_lock);
                if (from_cache)
                        *from_cache = 1;
        }
@@ -1135,9 +1135,9 @@ int ofd_create(const struct lu_env *env, struct obd_export *exp,
                        GOTO(out_nolock, rc = 0);
                }
                /* This causes inflight precreates to abort and drop lock */
-               cfs_set_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
-               cfs_mutex_lock(&ofd->ofd_create_locks[oa->o_seq]);
-               if (!cfs_test_bit(oa->o_seq, &ofd->ofd_destroys_in_progress)) {
+               set_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
+               mutex_lock(&ofd->ofd_create_locks[oa->o_seq]);
+               if (!test_bit(oa->o_seq, &ofd->ofd_destroys_in_progress)) {
                        CERROR("%s:["LPU64"] destroys_in_progress already cleared\n",
                               exp->exp_obd->obd_name, oa->o_seq);
                        GOTO(out, rc = 0);
@@ -1150,13 +1150,13 @@ int ofd_create(const struct lu_env *env, struct obd_export *exp,
                        rc = 0;
                } else if (diff < 0) {
                        rc = ofd_orphans_destroy(env, exp, ofd, oa);
-                       cfs_clear_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
+                       clear_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
                } else {
                        /* XXX: Used by MDS for the first time! */
-                       cfs_clear_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
+                       clear_bit(oa->o_seq, &ofd->ofd_destroys_in_progress);
                }
        } else {
-               cfs_mutex_lock(&ofd->ofd_create_locks[oa->o_seq]);
+               mutex_lock(&ofd->ofd_create_locks[oa->o_seq]);
                if (oti->oti_conn_cnt < exp->exp_conn_cnt) {
                        CERROR("%s: dropping old precreate request\n",
                               ofd_obd(ofd)->obd_name);
@@ -1235,7 +1235,7 @@ int ofd_create(const struct lu_env *env, struct obd_export *exp,
 
        ofd_info2oti(info, oti);
 out:
-       cfs_mutex_unlock(&ofd->ofd_create_locks[oa->o_seq]);
+       mutex_unlock(&ofd->ofd_create_locks[oa->o_seq]);
 out_nolock:
        if (rc == 0 && ea != NULL) {
                struct lov_stripe_md *lsm = *ea;
@@ -1462,9 +1462,9 @@ static int ofd_obd_notify(struct obd_device *obd, struct obd_device *unused,
        switch (ev) {
        case OBD_NOTIFY_CONFIG:
                LASSERT(obd->obd_no_conn);
-               cfs_spin_lock(&obd->obd_dev_lock);
+               spin_lock(&obd->obd_dev_lock);
                obd->obd_no_conn = 0;
-               cfs_spin_unlock(&obd->obd_dev_lock);
+               spin_unlock(&obd->obd_dev_lock);
                break;
        default:
                CDEBUG(D_INFO, "%s: Unhandled notification %#x\n",
index 8ff3c0d..8191c00 100644 (file)
@@ -61,9 +61,9 @@ int ofd_version_get_check(struct ofd_thread_info *info,
            info->fti_pre_version != curr_version) {
                CDEBUG(D_INODE, "Version mismatch "LPX64" != "LPX64"\n",
                       info->fti_pre_version, curr_version);
-               cfs_spin_lock(&info->fti_exp->exp_lock);
+               spin_lock(&info->fti_exp->exp_lock);
                info->fti_exp->exp_vbr_failed = 1;
-               cfs_spin_unlock(&info->fti_exp->exp_lock);
+               spin_unlock(&info->fti_exp->exp_lock);
                RETURN (-EOVERFLOW);
        }
        info->fti_pre_version = curr_version;
index bcd8c7a..37b2a1a 100644 (file)
@@ -153,10 +153,10 @@ static int ofd_last_rcvd_update(struct ofd_thread_info *info,
         */
        if (info->fti_transno == 0 &&
            *transno_p == ofd->ofd_lut.lut_last_transno) {
-               cfs_spin_lock(&ofd->ofd_lut.lut_translock);
+               spin_lock(&ofd->ofd_lut.lut_translock);
                ofd->ofd_lut.lut_lsd.lsd_last_transno =
                                                ofd->ofd_lut.lut_last_transno;
-               cfs_spin_unlock(&ofd->ofd_lut.lut_translock);
+               spin_unlock(&ofd->ofd_lut.lut_translock);
                tgt_server_data_write(info->fti_env, &ofd->ofd_lut, th);
        }
 
@@ -172,12 +172,12 @@ static int ofd_last_rcvd_update(struct ofd_thread_info *info,
                err = 0;
                /* All operations performed by LW clients are synchronous and
                 * we store the committed transno in the last_rcvd header */
-               cfs_spin_lock(&tg->lut_translock);
+               spin_lock(&tg->lut_translock);
                if (info->fti_transno > tg->lut_lsd.lsd_last_transno) {
                        tg->lut_lsd.lsd_last_transno = info->fti_transno;
                        update = true;
                }
-               cfs_spin_unlock(&tg->lut_translock);
+               spin_unlock(&tg->lut_translock);
                if (update)
                        err = tgt_server_data_write(info->fti_env, tg, th);
        } else {
@@ -216,7 +216,7 @@ int ofd_txn_stop_cb(const struct lu_env *env, struct thandle *txn,
                info->fti_has_trans = 1;
        }
 
-       cfs_spin_lock(&ofd->ofd_lut.lut_translock);
+       spin_lock(&ofd->ofd_lut.lut_translock);
        if (txn->th_result != 0) {
                if (info->fti_transno != 0) {
                        CERROR("Replay transno "LPU64" failed: rc %d\n",
@@ -230,7 +230,7 @@ int ofd_txn_stop_cb(const struct lu_env *env, struct thandle *txn,
                if (info->fti_transno > ofd->ofd_lut.lut_last_transno)
                        ofd->ofd_lut.lut_last_transno = info->fti_transno;
        }
-       cfs_spin_unlock(&ofd->ofd_lut.lut_translock);
+       spin_unlock(&ofd->ofd_lut.lut_translock);
 
        /** VBR: set new versions */
        if (txn->th_result == 0 && info->fti_obj != NULL) {
index 0470836..48ca84c 100644 (file)
@@ -1054,9 +1054,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
                rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
                switch (rc) {
                case 0:
-                       cfs_spin_lock(&oap->oap_lock);
+                       spin_lock(&oap->oap_lock);
                        oap->oap_async_flags |= ASYNC_READY;
-                       cfs_spin_unlock(&oap->oap_lock);
+                       spin_unlock(&oap->oap_lock);
                        break;
                case -EALREADY:
                        LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
@@ -1270,12 +1270,12 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
        /* Clear opg->ops_transfer_pinned before VM lock is released. */
        opg->ops_transfer_pinned = 0;
 
-       cfs_spin_lock(&obj->oo_seatbelt);
+       spin_lock(&obj->oo_seatbelt);
        LASSERT(opg->ops_submitter != NULL);
        LASSERT(!cfs_list_empty(&opg->ops_inflight));
        cfs_list_del_init(&opg->ops_inflight);
        opg->ops_submitter = NULL;
-       cfs_spin_unlock(&obj->oo_seatbelt);
+       spin_unlock(&obj->oo_seatbelt);
 
        opg->ops_submit_time = 0;
        srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
@@ -1753,9 +1753,9 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
        }
 
        /* As the transfer for this page is being done, clear the flags */
-       cfs_spin_lock(&oap->oap_lock);
+       spin_lock(&oap->oap_lock);
        oap->oap_async_flags = 0;
-       cfs_spin_unlock(&oap->oap_lock);
+       spin_unlock(&oap->oap_lock);
        oap->oap_interrupted = 0;
 
        if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
@@ -2171,7 +2171,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
        CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
        CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
 
-       cfs_spin_lock_init(&oap->oap_lock);
+       spin_lock_init(&oap->oap_lock);
        CDEBUG(D_INFO, "oap %p page %p obj off "LPU64"\n",
               oap, page, oap->oap_obj_off);
        RETURN(0);
@@ -2425,9 +2425,9 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
        if (rc)
                GOTO(out, rc);
 
-       cfs_spin_lock(&oap->oap_lock);
+       spin_lock(&oap->oap_lock);
        oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
-       cfs_spin_unlock(&oap->oap_lock);
+       spin_unlock(&oap->oap_lock);
 
        if (cfs_memory_pressure_get())
                ext->oe_memalloc = 1;
index d10d26f..2623715 100644 (file)
@@ -85,7 +85,7 @@ struct osc_io {
        struct osc_async_cbargs {
                bool              opc_rpc_sent;
                int               opc_rc;
-               cfs_completion_t  opc_sync;
+               struct completion       opc_sync;
        } oi_cbarg;
 };
 
@@ -129,7 +129,7 @@ struct osc_object {
          */
         struct cl_io       oo_debug_io;
         /** Serialization object for osc_object::oo_debug_io. */
-        cfs_mutex_t        oo_debug_mutex;
+       struct mutex       oo_debug_mutex;
 #endif
         /**
          * List of pages in transfer.
@@ -139,7 +139,7 @@ struct osc_object {
          * Lock, protecting ccc_object::cob_inflight, because a seat-belt is
          * locked during take-off and landing.
          */
-        cfs_spinlock_t     oo_seatbelt;
+       spinlock_t         oo_seatbelt;
 
        /**
         * used by the osc to keep track of what objects to build into rpcs.
@@ -168,27 +168,27 @@ struct osc_object {
 
        /** Protect extent tree. Will be used to protect
         * oo_{read|write}_pages soon. */
-       cfs_spinlock_t       oo_lock;
+       spinlock_t          oo_lock;
 };
 
 static inline void osc_object_lock(struct osc_object *obj)
 {
-       cfs_spin_lock(&obj->oo_lock);
+       spin_lock(&obj->oo_lock);
 }
 
 static inline int osc_object_trylock(struct osc_object *obj)
 {
-       return cfs_spin_trylock(&obj->oo_lock);
+       return spin_trylock(&obj->oo_lock);
 }
 
 static inline void osc_object_unlock(struct osc_object *obj)
 {
-       cfs_spin_unlock(&obj->oo_lock);
+       spin_unlock(&obj->oo_lock);
 }
 
 static inline int osc_object_is_locked(struct osc_object *obj)
 {
-       return cfs_spin_is_locked(&obj->oo_lock);
+       return spin_is_locked(&obj->oo_lock);
 }
 
 /*
index fc3a47f..f4629b4 100644 (file)
@@ -102,7 +102,7 @@ struct lu_kmem_descr osc_caches[] = {
         }
 };
 
-cfs_lock_class_key_t osc_ast_guard_class;
+struct lock_class_key osc_ast_guard_class;
 
 /*****************************************************************************
  *
index d3cfc8d..255bf04 100644 (file)
@@ -69,8 +69,8 @@ struct osc_async_page {
         struct client_obd       *oap_cli;
        struct osc_object       *oap_obj;
 
-        struct ldlm_lock        *oap_ldlm_lock;
-        cfs_spinlock_t           oap_lock;
+       struct ldlm_lock        *oap_ldlm_lock;
+       spinlock_t               oap_lock;
 };
 
 #define oap_page        oap_brw_page.pg
@@ -131,7 +131,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
                  cfs_list_t *ext_list, int cmd, pdl_policy_t p);
 int osc_lru_shrink(struct client_obd *cli, int target);
 
-extern cfs_spinlock_t osc_ast_guard;
+extern spinlock_t osc_ast_guard;
 
 int osc_cleanup(struct obd_device *obd);
 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
index f75aa76..98525eb 100644 (file)
@@ -337,7 +337,7 @@ static int osc_async_upcall(void *a, int rc)
        struct osc_async_cbargs *args = a;
 
         args->opc_rc = rc;
-        cfs_complete(&args->opc_sync);
+       complete(&args->opc_sync);
         return 0;
 }
 
@@ -471,7 +471,7 @@ static int osc_io_setattr_start(const struct lu_env *env,
 
                 oinfo.oi_oa = oa;
                 oinfo.oi_capa = io->u.ci_setattr.sa_capa;
-                cfs_init_completion(&cbargs->opc_sync);
+               init_completion(&cbargs->opc_sync);
 
                 if (ia_valid & ATTR_SIZE)
                         result = osc_punch_base(osc_export(cl2osc(obj)),
@@ -497,7 +497,7 @@ static void osc_io_setattr_end(const struct lu_env *env,
         int result = 0;
 
        if (cbargs->opc_rpc_sent) {
-               cfs_wait_for_completion(&cbargs->opc_sync);
+               wait_for_completion(&cbargs->opc_sync);
                result = io->ci_result = cbargs->opc_rc;
        }
         if (result == 0) {
@@ -593,7 +593,7 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
        memset(oinfo, 0, sizeof(*oinfo));
        oinfo->oi_oa = oa;
        oinfo->oi_capa = fio->fi_capa;
-       cfs_init_completion(&cbargs->opc_sync);
+       init_completion(&cbargs->opc_sync);
 
        rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
                           PTLRPCD_SET);
@@ -655,7 +655,7 @@ static void osc_io_fsync_end(const struct lu_env *env,
                struct osc_io           *oio    = cl2osc_io(env, slice);
                struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
 
-               cfs_wait_for_completion(&cbargs->opc_sync);
+               wait_for_completion(&cbargs->opc_sync);
                if (result == 0)
                        result = cbargs->opc_rc;
        }
index 08b3a96..3a518c4 100644 (file)
@@ -135,12 +135,12 @@ static int osc_lock_invariant(struct osc_lock *ols)
  */
 static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
 {
-        struct ldlm_lock *dlmlock;
+       struct ldlm_lock *dlmlock;
 
-        cfs_spin_lock(&osc_ast_guard);
-        dlmlock = olck->ols_lock;
-        if (dlmlock == NULL) {
-                cfs_spin_unlock(&osc_ast_guard);
+       spin_lock(&osc_ast_guard);
+       dlmlock = olck->ols_lock;
+       if (dlmlock == NULL) {
+               spin_unlock(&osc_ast_guard);
                 return;
         }
 
@@ -149,7 +149,7 @@ static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
          * call to osc_lock_detach() */
         dlmlock->l_ast_data = NULL;
         olck->ols_handle.cookie = 0ULL;
-        cfs_spin_unlock(&osc_ast_guard);
+       spin_unlock(&osc_ast_guard);
 
         lock_res_and_lock(dlmlock);
         if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
@@ -293,14 +293,14 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags)
  * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
  * pointers. Initialized in osc_init().
  */
-cfs_spinlock_t osc_ast_guard;
+spinlock_t osc_ast_guard;
 
 static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
 {
-        struct osc_lock *olck;
+       struct osc_lock *olck;
 
-        lock_res_and_lock(dlm_lock);
-        cfs_spin_lock(&osc_ast_guard);
+       lock_res_and_lock(dlm_lock);
+       spin_lock(&osc_ast_guard);
         olck = dlm_lock->l_ast_data;
         if (olck != NULL) {
                 struct cl_lock *lock = olck->ols_cl.cls_lock;
@@ -320,9 +320,9 @@ static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
                 } else
                         olck = NULL;
         }
-        cfs_spin_unlock(&osc_ast_guard);
-        unlock_res_and_lock(dlm_lock);
-        return olck;
+       spin_unlock(&osc_ast_guard);
+       unlock_res_and_lock(dlm_lock);
+       return olck;
 }
 
 static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
@@ -466,11 +466,11 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
         LASSERT(dlmlock != NULL);
 
         lock_res_and_lock(dlmlock);
-        cfs_spin_lock(&osc_ast_guard);
-        LASSERT(dlmlock->l_ast_data == olck);
-        LASSERT(olck->ols_lock == NULL);
-        olck->ols_lock = dlmlock;
-        cfs_spin_unlock(&osc_ast_guard);
+       spin_lock(&osc_ast_guard);
+       LASSERT(dlmlock->l_ast_data == olck);
+       LASSERT(olck->ols_lock == NULL);
+       olck->ols_lock = dlmlock;
+       spin_unlock(&osc_ast_guard);
 
         /*
          * Lock might be not yet granted. In this case, completion ast
@@ -530,11 +530,11 @@ static int osc_lock_upcall(void *cookie, int errcode)
                         dlmlock = ldlm_handle2lock(&olck->ols_handle);
                         if (dlmlock != NULL) {
                                 lock_res_and_lock(dlmlock);
-                                cfs_spin_lock(&osc_ast_guard);
-                                LASSERT(olck->ols_lock == NULL);
-                                dlmlock->l_ast_data = NULL;
-                                olck->ols_handle.cookie = 0ULL;
-                                cfs_spin_unlock(&osc_ast_guard);
+                               spin_lock(&osc_ast_guard);
+                               LASSERT(olck->ols_lock == NULL);
+                               dlmlock->l_ast_data = NULL;
+                               olck->ols_handle.cookie = 0ULL;
+                               spin_unlock(&osc_ast_guard);
                                 ldlm_lock_fail_match_locked(dlmlock);
                                 unlock_res_and_lock(dlmlock);
                                 LDLM_LOCK_PUT(dlmlock);
@@ -1087,7 +1087,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
         if (olck->ols_glimpse)
                 return 0;
 
-        cfs_spin_lock(&hdr->coh_lock_guard);
+       spin_lock(&hdr->coh_lock_guard);
         cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
                 struct cl_lock_descr *cld = &scan->cll_descr;
                 const struct osc_lock *scan_ols;
@@ -1125,7 +1125,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
                 conflict = scan;
                 break;
         }
-        cfs_spin_unlock(&hdr->coh_lock_guard);
+       spin_unlock(&hdr->coh_lock_guard);
 
         if (conflict) {
                 if (lock->cll_descr.cld_mode == CLM_GROUP) {
@@ -1450,7 +1450,7 @@ static int osc_lock_has_pages(struct osc_lock *olck)
         lock  = olck->ols_cl.cls_lock;
         descr = &lock->cll_descr;
 
-        cfs_mutex_lock(&oob->oo_debug_mutex);
+       mutex_lock(&oob->oo_debug_mutex);
 
         io->ci_obj = cl_object_top(obj);
        io->ci_ignore_layout = 1;
@@ -1465,7 +1465,7 @@ static int osc_lock_has_pages(struct osc_lock *olck)
                         cfs_cond_resched();
         } while (result != CLP_GANG_OKAY);
         cl_io_fini(env, io);
-        cfs_mutex_unlock(&oob->oo_debug_mutex);
+       mutex_unlock(&oob->oo_debug_mutex);
         cl_env_nested_put(&nest, env);
 
         return (result == CLP_GANG_ABORT);
@@ -1723,10 +1723,10 @@ int osc_lock_init(const struct lu_env *env,
 
 int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
 {
-        struct osc_lock *olock;
-        int              rc = 0;
+       struct osc_lock *olock;
+       int              rc = 0;
 
-        cfs_spin_lock(&osc_ast_guard);
+       spin_lock(&osc_ast_guard);
         olock = dlm->l_ast_data;
         /*
          * there's a very rare race with osc_page_addref_lock(), but that
@@ -1739,8 +1739,8 @@ int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
                 cfs_atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
                 rc = 1;
         }
-        cfs_spin_unlock(&osc_ast_guard);
-        return rc;
+       spin_unlock(&osc_ast_guard);
+       return rc;
 }
 
 /** @} osc */
index 5f577ea..bd2e6f4 100644 (file)
@@ -78,9 +78,9 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
 
         osc->oo_oinfo = cconf->u.coc_oinfo;
 #ifdef INVARIANT_CHECK
-        cfs_mutex_init(&osc->oo_debug_mutex);
+       mutex_init(&osc->oo_debug_mutex);
 #endif
-        cfs_spin_lock_init(&osc->oo_seatbelt);
+       spin_lock_init(&osc->oo_seatbelt);
         for (i = 0; i < CRT_NR; ++i)
                 CFS_INIT_LIST_HEAD(&osc->oo_inflight[i]);
 
@@ -96,7 +96,7 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
        CFS_INIT_LIST_HEAD(&osc->oo_reading_exts);
        cfs_atomic_set(&osc->oo_nr_reads, 0);
        cfs_atomic_set(&osc->oo_nr_writes, 0);
-       cfs_spin_lock_init(&osc->oo_lock);
+       spin_lock_init(&osc->oo_lock);
 
        return 0;
 }
index 126256f..1383469 100644 (file)
@@ -126,7 +126,7 @@ static int osc_page_protected(const struct lu_env *env,
                 descr->cld_mode = mode;
                 descr->cld_start = page->cp_index;
                 descr->cld_end   = page->cp_index;
-                cfs_spin_lock(&hdr->coh_lock_guard);
+               spin_lock(&hdr->coh_lock_guard);
                 cfs_list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
                         /*
                          * Lock-less sub-lock has to be either in HELD state
@@ -144,7 +144,7 @@ static int osc_page_protected(const struct lu_env *env,
                                 break;
                         }
                 }
-                cfs_spin_unlock(&hdr->coh_lock_guard);
+               spin_unlock(&hdr->coh_lock_guard);
         }
         return result;
 }
@@ -207,10 +207,10 @@ static void osc_page_transfer_add(const struct lu_env *env,
         * first and then use it as inflight. */
        osc_lru_del(osc_cli(obj), opg, false);
 
-        cfs_spin_lock(&obj->oo_seatbelt);
-        cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
-        opg->ops_submitter = cfs_current();
-        cfs_spin_unlock(&obj->oo_seatbelt);
+       spin_lock(&obj->oo_seatbelt);
+       cfs_list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
+       opg->ops_submitter = cfs_current();
+       spin_unlock(&obj->oo_seatbelt);
 }
 
 static int osc_page_cache_add(const struct lu_env *env,
@@ -432,13 +432,13 @@ static void osc_page_delete(const struct lu_env *env,
                 LASSERT(0);
         }
 
-       cfs_spin_lock(&obj->oo_seatbelt);
+       spin_lock(&obj->oo_seatbelt);
        if (opg->ops_submitter != NULL) {
                LASSERT(!cfs_list_empty(&opg->ops_inflight));
                cfs_list_del_init(&opg->ops_inflight);
                opg->ops_submitter = NULL;
        }
-       cfs_spin_unlock(&obj->oo_seatbelt);
+       spin_unlock(&obj->oo_seatbelt);
 
        osc_lru_del(osc_cli(obj), opg, true);
        EXIT;
@@ -454,9 +454,9 @@ void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
 
         opg->ops_from = from;
         opg->ops_to   = to;
-        cfs_spin_lock(&oap->oap_lock);
-        oap->oap_async_flags |= ASYNC_COUNT_STABLE;
-        cfs_spin_unlock(&oap->oap_lock);
+       spin_lock(&oap->oap_lock);
+       oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+       spin_unlock(&oap->oap_lock);
 }
 
 static int osc_page_cancel(const struct lu_env *env,
@@ -844,7 +844,7 @@ static int osc_lru_reclaim(struct client_obd *cli)
 
        /* Reclaim LRU slots from other client_obd as it can't free enough
         * from its own. This should rarely happen. */
-       cfs_spin_lock(&cache->ccc_lru_lock);
+       spin_lock(&cache->ccc_lru_lock);
        cache->ccc_lru_shrinkers++;
        cfs_list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
        cfs_list_for_each_entry_safe(victim, tmp, &cache->ccc_lru, cl_lru_osc) {
@@ -860,7 +860,7 @@ static int osc_lru_reclaim(struct client_obd *cli)
                if (cfs_atomic_read(&victim->cl_lru_in_list) > 0)
                        break;
        }
-       cfs_spin_unlock(&cache->ccc_lru_lock);
+       spin_unlock(&cache->ccc_lru_lock);
        if (victim == cli) {
                CDEBUG(D_CACHE, "%s: can't get any free LRU slots.\n",
                        cli->cl_import->imp_obd->obd_name);
index 4f896c3..7b84e6a 100644 (file)
@@ -2258,17 +2258,17 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
         LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
 
         lock_res_and_lock(lock);
-        cfs_spin_lock(&osc_ast_guard);
+       spin_lock(&osc_ast_guard);
 
-        if (lock->l_ast_data == NULL)
-                lock->l_ast_data = data;
-        if (lock->l_ast_data == data)
-                set = 1;
+       if (lock->l_ast_data == NULL)
+               lock->l_ast_data = data;
+       if (lock->l_ast_data == data)
+               set = 1;
 
-        cfs_spin_unlock(&osc_ast_guard);
-        unlock_res_and_lock(lock);
+       spin_unlock(&osc_ast_guard);
+       unlock_res_and_lock(lock);
 
-        return set;
+       return set;
 }
 
 static int osc_set_data_with_check(struct lustre_handle *lockh,
@@ -2797,10 +2797,10 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
 
         /*Since the request might also come from lprocfs, so we need
          *sync this with client_disconnect_export Bug15684*/
-        cfs_down_read(&obd->u.cli.cl_sem);
+       down_read(&obd->u.cli.cl_sem);
         if (obd->u.cli.cl_import)
                 imp = class_import_get(obd->u.cli.cl_import);
-        cfs_up_read(&obd->u.cli.cl_sem);
+       up_read(&obd->u.cli.cl_sem);
         if (!imp)
                 RETURN(-ENODEV);
 
@@ -3136,9 +3136,9 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
 
                /* add this osc into entity list */
                LASSERT(cfs_list_empty(&cli->cl_lru_osc));
-               cfs_spin_lock(&cli->cl_cache->ccc_lru_lock);
+               spin_lock(&cli->cl_cache->ccc_lru_lock);
                cfs_list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
-               cfs_spin_unlock(&cli->cl_cache->ccc_lru_lock);
+               spin_unlock(&cli->cl_cache->ccc_lru_lock);
 
                RETURN(0);
        }
@@ -3487,9 +3487,9 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
                 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
                 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
                 ptlrpc_deactivate_import(imp);
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_pingable = 0;
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               imp->imp_pingable = 0;
+               spin_unlock(&imp->imp_lock);
                 break;
         }
         case OBD_CLEANUP_EXPORTS: {
@@ -3530,9 +3530,9 @@ int osc_cleanup(struct obd_device *obd)
        /* lru cleanup */
        if (cli->cl_cache != NULL) {
                LASSERT(cfs_atomic_read(&cli->cl_cache->ccc_users) > 0);
-               cfs_spin_lock(&cli->cl_cache->ccc_lru_lock);
+               spin_lock(&cli->cl_cache->ccc_lru_lock);
                cfs_list_del_init(&cli->cl_lru_osc);
-               cfs_spin_unlock(&cli->cl_cache->ccc_lru_lock);
+               spin_unlock(&cli->cl_cache->ccc_lru_lock);
                cli->cl_lru_left = NULL;
                cfs_atomic_dec(&cli->cl_cache->ccc_users);
                cli->cl_cache = NULL;
@@ -3611,8 +3611,8 @@ struct obd_ops osc_obd_ops = {
 };
 
 extern struct lu_kmem_descr osc_caches[];
-extern cfs_spinlock_t       osc_ast_guard;
-extern cfs_lock_class_key_t osc_ast_guard_class;
+extern spinlock_t osc_ast_guard;
+extern struct lock_class_key osc_ast_guard_class;
 
 int __init osc_init(void)
 {
@@ -3636,10 +3636,10 @@ int __init osc_init(void)
                 RETURN(rc);
         }
 
-        cfs_spin_lock_init(&osc_ast_guard);
-        cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
+       spin_lock_init(&osc_ast_guard);
+       lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 #ifdef __KERNEL__
index b94689c..249fa0e 100644 (file)
@@ -59,7 +59,7 @@
 
 struct osd_compat_objid_seq {
         /* protects on-fly initialization */
-        cfs_semaphore_t        dir_init_sem;
+       struct semaphore        dir_init_sem;
         /* file storing last created objid */
         struct osd_inode_id    last_id;
         struct dentry         *groot; /* O/<seq> */
@@ -130,7 +130,7 @@ int osd_compat_seq_init(struct osd_device *osd, int seq)
         if (grp->groot != NULL)
                 RETURN(0);
 
-        cfs_down(&grp->dir_init_sem);
+       down(&grp->dir_init_sem);
 
         sprintf(name, "%d", seq);
         d = simple_mkdir(map->root, osd->od_mnt, name, 0755, 1);
@@ -169,7 +169,7 @@ int osd_compat_seq_init(struct osd_device *osd, int seq)
         if (rc)
                 osd_compat_seq_fini(osd, seq);
 out:
-        cfs_up(&grp->dir_init_sem);
+       up(&grp->dir_init_sem);
         RETURN(rc);
 }
 
@@ -279,7 +279,7 @@ int osd_compat_init(struct osd_device *dev)
 
         /* Initialize all groups */
         for (i = 0; i < MAX_OBJID_GROUP; i++) {
-                cfs_sema_init(&dev->od_ost_map->groups[i].dir_init_sem, 1);
+               sema_init(&dev->od_ost_map->groups[i].dir_init_sem, 1);
                 rc = osd_compat_seq_init(dev, i);
                 if (rc) {
                         osd_compat_fini(dev);
index 76a346c..dc8c919 100644 (file)
@@ -152,9 +152,9 @@ static struct lu_object *osd_object_alloc(const struct lu_env *env,
                 dt_object_init(&mo->oo_dt, NULL, d);
                mo->oo_dt.do_ops = &osd_obj_ea_ops;
                 l->lo_ops = &osd_lu_obj_ops;
-                cfs_init_rwsem(&mo->oo_sem);
-                cfs_init_rwsem(&mo->oo_ext_idx_sem);
-                cfs_spin_lock_init(&mo->oo_guard);
+               init_rwsem(&mo->oo_sem);
+               init_rwsem(&mo->oo_ext_idx_sem);
+               spin_lock_init(&mo->oo_guard);
                 return l;
         } else {
                 return NULL;
@@ -925,7 +925,7 @@ int osd_statfs(const struct lu_env *env, struct dt_device *d,
                 ksfs = &osd_oti_get(env)->oti_ksfs;
         }
 
-       cfs_spin_lock(&osd->od_osfs_lock);
+       spin_lock(&osd->od_osfs_lock);
        /* cache 1 second */
        if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
                result = sb->s_op->statfs(sb->s_root, ksfs);
@@ -937,9 +937,9 @@ int osd_statfs(const struct lu_env *env, struct dt_device *d,
                }
        }
 
-        if (likely(result == 0))
-                *sfs = osd->od_statfs;
-        cfs_spin_unlock(&osd->od_osfs_lock);
+       if (likely(result == 0))
+               *sfs = osd->od_statfs;
+       spin_unlock(&osd->od_osfs_lock);
 
         if (unlikely(env == NULL))
                 OBD_FREE_PTR(ksfs);
@@ -1155,7 +1155,7 @@ static void osd_object_read_lock(const struct lu_env *env,
         LINVRNT(osd_invariant(obj));
 
         LASSERT(obj->oo_owner != env);
-        cfs_down_read_nested(&obj->oo_sem, role);
+       down_read_nested(&obj->oo_sem, role);
 
         LASSERT(obj->oo_owner == NULL);
         oti->oti_r_locks++;
@@ -1170,7 +1170,7 @@ static void osd_object_write_lock(const struct lu_env *env,
         LINVRNT(osd_invariant(obj));
 
         LASSERT(obj->oo_owner != env);
-        cfs_down_write_nested(&obj->oo_sem, role);
+       down_write_nested(&obj->oo_sem, role);
 
         LASSERT(obj->oo_owner == NULL);
         obj->oo_owner = env;
@@ -1187,7 +1187,7 @@ static void osd_object_read_unlock(const struct lu_env *env,
 
         LASSERT(oti->oti_r_locks > 0);
         oti->oti_r_locks--;
-        cfs_up_read(&obj->oo_sem);
+       up_read(&obj->oo_sem);
 }
 
 static void osd_object_write_unlock(const struct lu_env *env,
@@ -1202,7 +1202,7 @@ static void osd_object_write_unlock(const struct lu_env *env,
         LASSERT(oti->oti_w_locks > 0);
         oti->oti_w_locks--;
         obj->oo_owner = NULL;
-        cfs_up_write(&obj->oo_sem);
+       up_write(&obj->oo_sem);
 }
 
 static int osd_object_write_locked(const struct lu_env *env,
@@ -1241,14 +1241,14 @@ static int capa_is_sane(const struct lu_env *env,
                 RETURN(-ESTALE);
         }
 
-        cfs_spin_lock(&capa_lock);
-        for (i = 0; i < 2; i++) {
-                if (keys[i].lk_keyid == capa->lc_keyid) {
-                        oti->oti_capa_key = keys[i];
-                        break;
-                }
-        }
-        cfs_spin_unlock(&capa_lock);
+       spin_lock(&capa_lock);
+       for (i = 0; i < 2; i++) {
+               if (keys[i].lk_keyid == capa->lc_keyid) {
+                       oti->oti_capa_key = keys[i];
+                       break;
+               }
+       }
+       spin_unlock(&capa_lock);
 
         if (i == 2) {
                 DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
@@ -1363,10 +1363,10 @@ static int osd_attr_get(const struct lu_env *env,
         if (osd_object_auth(env, dt, capa, CAPA_OPC_META_READ))
                 return -EACCES;
 
-        cfs_spin_lock(&obj->oo_guard);
-        osd_inode_getattr(env, obj->oo_inode, attr);
-        cfs_spin_unlock(&obj->oo_guard);
-        return 0;
+       spin_lock(&obj->oo_guard);
+       osd_inode_getattr(env, obj->oo_inode, attr);
+       spin_unlock(&obj->oo_guard);
+       return 0;
 }
 
 static int osd_declare_attr_set(const struct lu_env *env,
@@ -1607,9 +1607,9 @@ static int osd_attr_set(const struct lu_env *env,
        if (rc)
                return rc;
 
-        cfs_spin_lock(&obj->oo_guard);
-        rc = osd_inode_setattr(env, inode, attr);
-        cfs_spin_unlock(&obj->oo_guard);
+       spin_lock(&obj->oo_guard);
+       rc = osd_inode_setattr(env, inode, attr);
+       spin_unlock(&obj->oo_guard);
 
         if (!rc)
                 inode->i_sb->s_op->dirty_inode(inode);
@@ -2048,13 +2048,13 @@ static int osd_object_destroy(const struct lu_env *env,
 
        /* Parallel control for OI scrub. For most of cases, there is no
         * lock contention. So it will not affect unlink performance. */
-       cfs_mutex_lock(&inode->i_mutex);
+       mutex_lock(&inode->i_mutex);
        if (S_ISDIR(inode->i_mode)) {
                LASSERT(osd_inode_unlinked(inode) ||
                        inode->i_nlink == 1);
-               cfs_spin_lock(&obj->oo_guard);
+               spin_lock(&obj->oo_guard);
                clear_nlink(inode);
-               cfs_spin_unlock(&obj->oo_guard);
+               spin_unlock(&obj->oo_guard);
                inode->i_sb->s_op->dirty_inode(inode);
        } else {
                LASSERT(osd_inode_unlinked(inode));
@@ -2063,7 +2063,7 @@ static int osd_object_destroy(const struct lu_env *env,
         OSD_EXEC_OP(th, destroy);
 
         result = osd_oi_delete(osd_oti_get(env), osd, fid, th);
-       cfs_mutex_unlock(&inode->i_mutex);
+       mutex_unlock(&inode->i_mutex);
 
         /* XXX: add to ext3 orphan list */
         /* rc = ext3_orphan_add(handle_t *handle, struct inode *inode) */
@@ -2259,7 +2259,7 @@ static int osd_object_ref_add(const struct lu_env *env,
         * at some point. Both e2fsprogs and any Lustre-supported ldiskfs
         * do not actually care whether this flag is set or not.
         */
-       cfs_spin_lock(&obj->oo_guard);
+       spin_lock(&obj->oo_guard);
        /* inc_nlink from 0 may cause WARN_ON */
        if(inode->i_nlink == 0)
                set_nlink(inode, 1);
@@ -2271,7 +2271,7 @@ static int osd_object_ref_add(const struct lu_env *env,
                        set_nlink(inode, 1);
        }
        LASSERT(inode->i_nlink <= LDISKFS_LINK_MAX);
-       cfs_spin_unlock(&obj->oo_guard);
+       spin_unlock(&obj->oo_guard);
        inode->i_sb->s_op->dirty_inode(inode);
        LINVRNT(osd_invariant(obj));
 
@@ -2311,7 +2311,7 @@ static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
 
         OSD_EXEC_OP(th, ref_del);
 
-       cfs_spin_lock(&obj->oo_guard);
+       spin_lock(&obj->oo_guard);
        LASSERT(inode->i_nlink > 0);
        drop_nlink(inode);
        /* If this is/was a many-subdir directory (nlink > LDISKFS_LINK_MAX)
@@ -2319,7 +2319,7 @@ static int osd_object_ref_del(const struct lu_env *env, struct dt_object *dt,
         * inode will be deleted incorrectly. */
        if (S_ISDIR(inode->i_mode) && inode->i_nlink == 0)
                set_nlink(inode, 1);
-       cfs_spin_unlock(&obj->oo_guard);
+       spin_unlock(&obj->oo_guard);
        inode->i_sb->s_op->dirty_inode(inode);
        LINVRNT(osd_invariant(obj));
 
@@ -2572,9 +2572,9 @@ static struct obd_capa *osd_capa_get(const struct lu_env *env,
                 RETURN(oc);
         }
 
-        cfs_spin_lock(&capa_lock);
-        *key = dev->od_capa_keys[1];
-        cfs_spin_unlock(&capa_lock);
+       spin_lock(&capa_lock);
+       *key = dev->od_capa_keys[1];
+       spin_unlock(&capa_lock);
 
         capa->lc_keyid = key->lk_keyid;
         capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
@@ -2711,28 +2711,28 @@ static int osd_index_try(const struct lu_env *env, struct dt_object *dt,
                 OBD_ALLOC_PTR(dir);
                 if (dir != NULL) {
 
-                        cfs_spin_lock(&obj->oo_guard);
-                        if (obj->oo_dir == NULL)
-                                obj->oo_dir = dir;
-                        else
-                                /*
-                                 * Concurrent thread allocated container data.
-                                 */
-                                OBD_FREE_PTR(dir);
-                        cfs_spin_unlock(&obj->oo_guard);
-                        /*
-                         * Now, that we have container data, serialize its
-                         * initialization.
-                         */
-                        cfs_down_write(&obj->oo_ext_idx_sem);
-                        /*
-                         * recheck under lock.
-                         */
-                        if (!osd_has_index(obj))
-                                result = osd_iam_container_init(env, obj, dir);
-                        else
-                                result = 0;
-                        cfs_up_write(&obj->oo_ext_idx_sem);
+                       spin_lock(&obj->oo_guard);
+                       if (obj->oo_dir == NULL)
+                               obj->oo_dir = dir;
+                       else
+                               /*
+                                * Concurrent thread allocated container data.
+                                */
+                               OBD_FREE_PTR(dir);
+                       spin_unlock(&obj->oo_guard);
+                       /*
+                        * Now, that we have container data, serialize its
+                        * initialization.
+                        */
+                       down_write(&obj->oo_ext_idx_sem);
+                       /*
+                        * recheck under lock.
+                        */
+                       if (!osd_has_index(obj))
+                               result = osd_iam_container_init(env, obj, dir);
+                       else
+                               result = 0;
+                       up_write(&obj->oo_ext_idx_sem);
                 } else {
                         result = -ENOMEM;
                 }
@@ -2988,7 +2988,7 @@ static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
                                    dir, LDISKFS_HLOCK_DEL);
         } else {
-                cfs_down_write(&obj->oo_ext_idx_sem);
+               down_write(&obj->oo_ext_idx_sem);
         }
 
         bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
@@ -3002,7 +3002,7 @@ static int osd_index_ea_delete(const struct lu_env *env, struct dt_object *dt,
         if (hlock != NULL)
                 ldiskfs_htree_unlock(hlock);
         else
-                cfs_up_write(&obj->oo_ext_idx_sem);
+               up_write(&obj->oo_ext_idx_sem);
 
         LASSERT(osd_invariant(obj));
         RETURN(rc);
@@ -3291,7 +3291,7 @@ static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
                         ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
                                            pobj->oo_inode, 0);
                 } else {
-                        cfs_down_write(&pobj->oo_ext_idx_sem);
+                       down_write(&pobj->oo_ext_idx_sem);
                 }
                 rc = osd_add_dot_dotdot(info, pobj, cinode, name,
                      (struct dt_rec *)lu_object_fid(&pobj->oo_dt.do_lu),
@@ -3301,7 +3301,7 @@ static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
                         ldiskfs_htree_lock(hlock, pobj->oo_hl_head,
                                            pobj->oo_inode, LDISKFS_HLOCK_ADD);
                 } else {
-                        cfs_down_write(&pobj->oo_ext_idx_sem);
+                       down_write(&pobj->oo_ext_idx_sem);
                 }
 
                 rc = __osd_ea_add_rec(info, pobj, cinode, name, fid,
@@ -3310,7 +3310,7 @@ static int osd_ea_add_rec(const struct lu_env *env, struct osd_object *pobj,
         if (hlock != NULL)
                 ldiskfs_htree_unlock(hlock);
         else
-                cfs_up_write(&pobj->oo_ext_idx_sem);
+               up_write(&pobj->oo_ext_idx_sem);
 
         return rc;
 }
@@ -3394,7 +3394,7 @@ static int osd_ea_lookup_rec(const struct lu_env *env, struct osd_object *obj,
                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
                                    dir, LDISKFS_HLOCK_LOOKUP);
         } else {
-                cfs_down_read(&obj->oo_ext_idx_sem);
+               down_read(&obj->oo_ext_idx_sem);
         }
 
         bh = osd_ldiskfs_find_entry(dir, dentry, &de, hlock);
@@ -3435,7 +3435,7 @@ out:
        if (hlock != NULL)
                ldiskfs_htree_unlock(hlock);
        else
-               cfs_up_read(&obj->oo_ext_idx_sem);
+               up_read(&obj->oo_ext_idx_sem);
        return rc;
 }
 
@@ -4030,7 +4030,7 @@ static int osd_ldiskfs_it_fill(const struct lu_env *env,
                 ldiskfs_htree_lock(hlock, obj->oo_hl_head,
                                    inode, LDISKFS_HLOCK_READDIR);
         } else {
-                cfs_down_read(&obj->oo_ext_idx_sem);
+               down_read(&obj->oo_ext_idx_sem);
         }
 
         result = inode->i_fop->readdir(&it->oie_file, it,
@@ -4039,7 +4039,7 @@ static int osd_ldiskfs_it_fill(const struct lu_env *env,
         if (hlock != NULL)
                 ldiskfs_htree_unlock(hlock);
         else
-                cfs_up_read(&obj->oo_ext_idx_sem);
+               up_read(&obj->oo_ext_idx_sem);
 
         if (it->oie_rd_dirent == 0) {
                 result = -EIO;
@@ -4485,8 +4485,8 @@ static int osd_device_init0(const struct lu_env *env,
        l->ld_ops = &osd_lu_ops;
        o->od_dt_dev.dd_ops = &osd_dt_ops;
 
-       cfs_spin_lock_init(&o->od_osfs_lock);
-       cfs_mutex_init(&o->od_otable_mutex);
+       spin_lock_init(&o->od_osfs_lock);
+       mutex_init(&o->od_otable_mutex);
        o->od_osfs_age = cfs_time_shift_64(-1000);
 
        o->od_capa_hash = init_capa_hash();
@@ -4662,9 +4662,9 @@ static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
 
        *exp = class_conn2export(&conn);
 
-       cfs_spin_lock(&osd->od_osfs_lock);
+       spin_lock(&osd->od_osfs_lock);
        osd->od_connects++;
-       cfs_spin_unlock(&osd->od_osfs_lock);
+       spin_unlock(&osd->od_osfs_lock);
 
        RETURN(0);
 }
@@ -4681,11 +4681,11 @@ static int osd_obd_disconnect(struct obd_export *exp)
        ENTRY;
 
        /* Only disconnect the underlying layers on the final disconnect. */
-       cfs_spin_lock(&osd->od_osfs_lock);
+       spin_lock(&osd->od_osfs_lock);
        osd->od_connects--;
        if (osd->od_connects == 0)
                release = 1;
-       cfs_spin_unlock(&osd->od_osfs_lock);
+       spin_unlock(&osd->od_osfs_lock);
 
        rc = class_disconnect(exp); /* bz 9811 */
 
index d29a564..4db8b1d 100644 (file)
@@ -237,13 +237,13 @@ static int iam_format_guess(struct iam_container *c)
                idle_blocks = (__u32 *)(c->ic_root_bh->b_data +
                                        c->ic_descr->id_root_gap +
                                        sizeof(struct dx_countlimit));
-               cfs_down(&c->ic_idle_sem);
+               down(&c->ic_idle_sem);
                bh = iam_load_idle_blocks(c, le32_to_cpu(*idle_blocks));
                if (bh != NULL && IS_ERR(bh))
                        result = PTR_ERR(bh);
                else
                        c->ic_idle_bh = bh;
-               cfs_up(&c->ic_idle_sem);
+               up(&c->ic_idle_sem);
        }
 
        return result;
@@ -258,9 +258,9 @@ int iam_container_init(struct iam_container *c,
        memset(c, 0, sizeof *c);
        c->ic_descr  = descr;
        c->ic_object = inode;
-       cfs_init_rwsem(&c->ic_sem);
+       init_rwsem(&c->ic_sem);
        dynlock_init(&c->ic_tree_lock);
-       cfs_sema_init(&c->ic_idle_sem, 1);
+       sema_init(&c->ic_idle_sem, 1);
        return 0;
 }
 EXPORT_SYMBOL(iam_container_init);
@@ -692,22 +692,22 @@ static int iam_it_get_exact(struct iam_iterator *it, const struct iam_key *k)
 
 void iam_container_write_lock(struct iam_container *ic)
 {
-        cfs_down_write(&ic->ic_sem);
+       down_write(&ic->ic_sem);
 }
 
 void iam_container_write_unlock(struct iam_container *ic)
 {
-        cfs_up_write(&ic->ic_sem);
+       up_write(&ic->ic_sem);
 }
 
 void iam_container_read_lock(struct iam_container *ic)
 {
-        cfs_down_read(&ic->ic_sem);
+       down_read(&ic->ic_sem);
 }
 
 void iam_container_read_unlock(struct iam_container *ic)
 {
-        cfs_up_read(&ic->ic_sem);
+       up_read(&ic->ic_sem);
 }
 
 /*
@@ -1671,9 +1671,9 @@ iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
        if (c->ic_idle_bh == NULL)
                goto newblock;
 
-       cfs_down(&c->ic_idle_sem);
+       down(&c->ic_idle_sem);
        if (unlikely(c->ic_idle_bh == NULL)) {
-               cfs_up(&c->ic_idle_sem);
+               up(&c->ic_idle_sem);
                goto newblock;
        }
 
@@ -1691,7 +1691,7 @@ iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
                if (*e != 0)
                        goto fail;
 
-               cfs_up(&c->ic_idle_sem);
+               up(&c->ic_idle_sem);
                bh = ldiskfs_bread(NULL, inode, *b, 0, e);
                if (bh == NULL)
                        return NULL;
@@ -1729,7 +1729,7 @@ iam_new_node(handle_t *h, struct iam_container *c, iam_ptr_t *b, int *e)
        }
 
        c->ic_idle_bh = idle;
-       cfs_up(&c->ic_idle_sem);
+       up(&c->ic_idle_sem);
 
 got:
        /* get write access for the found buffer head */
@@ -1750,7 +1750,7 @@ newblock:
        return bh;
 
 fail:
-       cfs_up(&c->ic_idle_sem);
+       up(&c->ic_idle_sem);
        ldiskfs_std_error(inode->i_sb, *e);
        return NULL;
 }
@@ -2388,7 +2388,7 @@ static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
        int count;
        int rc;
 
-       cfs_down(&c->ic_idle_sem);
+       down(&c->ic_idle_sem);
        if (unlikely(c->ic_idle_failed)) {
                rc = -EFAULT;
                goto unlock;
@@ -2421,7 +2421,7 @@ static void iam_recycle_leaf(handle_t *h, struct iam_path *p,
        rc = iam_txn_dirty(h, p, c->ic_idle_bh);
 
 unlock:
-       cfs_up(&c->ic_idle_sem);
+       up(&c->ic_idle_sem);
        if (rc != 0)
                CWARN("%.16s: idle blocks failed, will lose the blk %u\n",
                      LDISKFS_SB(inode->i_sb)->s_es->s_volume_name, blk);
index f55a11b..fdb80e1 100644 (file)
@@ -482,12 +482,12 @@ struct iam_container {
         /*
          * read-write lock protecting index consistency.
          */
-        cfs_rw_semaphore_t   ic_sem;
+       struct rw_semaphore     ic_sem;
        struct dynlock       ic_tree_lock;
        /*
         * Protect ic_idle_bh
         */
-       cfs_semaphore_t      ic_idle_sem;
+       struct semaphore        ic_idle_sem;
        /*
         * BH for idle blocks
         */
@@ -1042,9 +1042,9 @@ static inline void iam_lock_bh(struct buffer_head volatile *bh)
 {
         DX_DEVAL(iam_lock_stats.dls_bh_lock++);
 #ifdef CONFIG_SMP
-        while (cfs_test_and_set_bit(BH_DXLock, &bh->b_state)) {
-                DX_DEVAL(iam_lock_stats.dls_bh_busy++);
-                while (cfs_test_bit(BH_DXLock, &bh->b_state))
+       while (test_and_set_bit(BH_DXLock, &bh->b_state)) {
+               DX_DEVAL(iam_lock_stats.dls_bh_busy++);
+               while (test_bit(BH_DXLock, &bh->b_state))
                         cpu_relax();
         }
 #endif
index 1bdd60e..527fa5c 100644 (file)
@@ -121,11 +121,11 @@ struct osd_object {
          * to protect index ops.
          */
         struct htree_lock_head *oo_hl_head;
-        cfs_rw_semaphore_t      oo_ext_idx_sem;
-        cfs_rw_semaphore_t      oo_sem;
-        struct osd_directory   *oo_dir;
-        /** protects inode attributes. */
-        cfs_spinlock_t          oo_guard;
+       struct rw_semaphore     oo_ext_idx_sem;
+       struct rw_semaphore     oo_sem;
+       struct osd_directory    *oo_dir;
+       /** protects inode attributes. */
+       spinlock_t              oo_guard;
         /**
          * Following two members are used to indicate the presence of dot and
          * dotdot in the given directory. This is required for interop mode
@@ -269,11 +269,11 @@ struct osd_device {
          */
         cfs_time_t                od_osfs_age;
         struct obd_statfs         od_statfs;
-        cfs_spinlock_t            od_osfs_lock;
+       spinlock_t                od_osfs_lock;
 
        unsigned int              od_noscrub:1;
 
-        struct fsfilt_operations *od_fsops;
+       struct fsfilt_operations *od_fsops;
        int                       od_connects;
        struct lu_site            od_site;
 
@@ -290,7 +290,7 @@ struct osd_device {
         cfs_atomic_t              od_r_in_flight;
         cfs_atomic_t              od_w_in_flight;
 
-       cfs_mutex_t               od_otable_mutex;
+       struct mutex              od_otable_mutex;
        struct osd_otable_it     *od_otable_it;
        struct osd_scrub          od_scrub;
 
index 77ece77..1a1d06c 100644 (file)
@@ -171,7 +171,7 @@ static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
         }
 
         /* the check is outside of the cycle for performance reason -bzzz */
-        if (!cfs_test_bit(BIO_RW, &bio->bi_rw)) {
+       if (!test_bit(BIO_RW, &bio->bi_rw)) {
                 bio_for_each_segment(bvl, bio, i) {
                         if (likely(error == 0))
                                 SetPageUptodate(bvl->bv_page);
@@ -922,10 +922,10 @@ int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
         int err;
 
         /* prevent reading after eof */
-        cfs_spin_lock(&inode->i_lock);
-        if (i_size_read(inode) < *offs + size) {
+       spin_lock(&inode->i_lock);
+       if (i_size_read(inode) < *offs + size) {
                loff_t diff = i_size_read(inode) - *offs;
-               cfs_spin_unlock(&inode->i_lock);
+               spin_unlock(&inode->i_lock);
                if (diff < 0) {
                        CDEBUG(D_EXT2, "size %llu is too short to read @%llu\n",
                               i_size_read(inode), *offs);
@@ -935,9 +935,9 @@ int osd_ldiskfs_read(struct inode *inode, void *buf, int size, loff_t *offs)
                } else {
                        size = diff;
                }
-        } else {
-                cfs_spin_unlock(&inode->i_lock);
-        }
+       } else {
+               spin_unlock(&inode->i_lock);
+       }
 
         blocksize = 1 << inode->i_blkbits;
         osize = size;
@@ -1103,14 +1103,14 @@ int osd_ldiskfs_write_record(struct inode *inode, void *buf, int bufsize,
                --new_size;
         /* correct in-core and on-disk sizes */
         if (new_size > i_size_read(inode)) {
-                cfs_spin_lock(&inode->i_lock);
-                if (new_size > i_size_read(inode))
-                        i_size_write(inode, new_size);
-                if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
-                        LDISKFS_I(inode)->i_disksize = i_size_read(inode);
-                        dirty_inode = 1;
-                }
-                cfs_spin_unlock(&inode->i_lock);
+               spin_lock(&inode->i_lock);
+               if (new_size > i_size_read(inode))
+                       i_size_write(inode, new_size);
+               if (i_size_read(inode) > LDISKFS_I(inode)->i_disksize) {
+                       LDISKFS_I(inode)->i_disksize = i_size_read(inode);
+                       dirty_inode = 1;
+               }
+               spin_unlock(&inode->i_lock);
                 if (dirty_inode)
                         inode->i_sb->s_op->dirty_inode(inode);
         }
index 9e99fb3..90494b6 100644 (file)
@@ -195,7 +195,7 @@ static int osd_stats_init(struct osd_device *osd)
         ENTRY;
 
         for (i = 0; i < BRW_LAST; i++)
-                cfs_spin_lock_init(&osd->od_brw_stats.hist[i].oh_lock);
+               spin_lock_init(&osd->od_brw_stats.hist[i].oh_lock);
 
         osd->od_stats = lprocfs_alloc_stats(LPROC_OSD_LAST, 0);
         if (osd->od_stats != NULL) {
index 29e7b92..7403d77 100644 (file)
@@ -78,7 +78,7 @@ CFS_MODULE_PARM(osd_oi_count, "i", int, 0444,
                 "it's only valid for new filesystem.");
 
 /** to serialize concurrent OI index initialization */
-static cfs_mutex_t oi_init_lock;
+static struct mutex oi_init_lock;
 
 static struct dt_index_features oi_feat = {
         .dif_flags       = DT_IND_UPDATE,
@@ -350,7 +350,7 @@ int osd_oi_init(struct osd_thread_info *info, struct osd_device *osd)
        if (oi == NULL)
                RETURN(-ENOMEM);
 
-       cfs_mutex_lock(&oi_init_lock);
+       mutex_lock(&oi_init_lock);
        /* try to open existing multiple OIs first */
        rc = osd_oi_table_open(info, osd, oi, sf->sf_oi_count, false);
        if (rc < 0)
@@ -413,7 +413,7 @@ out:
                rc = 0;
        }
 
-       cfs_mutex_unlock(&oi_init_lock);
+       mutex_unlock(&oi_init_lock);
        return rc;
 }
 
@@ -639,6 +639,6 @@ int osd_oi_mod_init(void)
                 osd_oi_count = size_roundup_power2(osd_oi_count);
         }
 
-        cfs_mutex_init(&oi_init_lock);
+       mutex_init(&oi_init_lock);
         return 0;
 }
index e6eca1e..613ccc6 100644 (file)
@@ -238,7 +238,7 @@ static int osd_scrub_prep(struct osd_device *dev)
        int                   rc;
        ENTRY;
 
-       cfs_down_write(&scrub->os_rwsem);
+       down_write(&scrub->os_rwsem);
        if (flags & SS_SET_FAILOUT)
                sf->sf_param |= SP_FAILOUT;
 
@@ -273,12 +273,12 @@ static int osd_scrub_prep(struct osd_device *dev)
        sf->sf_time_last_checkpoint = sf->sf_time_latest_start;
        rc = osd_scrub_file_store(scrub);
        if (rc == 0) {
-               cfs_spin_lock(&scrub->os_lock);
+               spin_lock(&scrub->os_lock);
                thread_set_flags(thread, SVC_RUNNING);
-               cfs_spin_unlock(&scrub->os_lock);
+               spin_unlock(&scrub->os_lock);
                cfs_waitq_broadcast(&thread->t_ctl_waitq);
        }
-       cfs_up_write(&scrub->os_rwsem);
+       up_write(&scrub->os_rwsem);
 
        RETURN(rc);
 }
@@ -289,13 +289,13 @@ osd_scrub_error(struct osd_device *dev, struct osd_inode_id *lid, int rc)
        struct osd_scrub  *scrub = &dev->od_scrub;
        struct scrub_file *sf    = &scrub->os_file;
 
-       cfs_down_write(&scrub->os_rwsem);
+       down_write(&scrub->os_rwsem);
        scrub->os_new_checked++;
        sf->sf_items_failed++;
        if (sf->sf_pos_first_inconsistent == 0 ||
            sf->sf_pos_first_inconsistent > lid->oii_ino)
                sf->sf_pos_first_inconsistent = lid->oii_ino;
-       cfs_up_write(&scrub->os_rwsem);
+       up_write(&scrub->os_rwsem);
        return sf->sf_param & SP_FAILOUT ? rc : 0;
 }
 
@@ -324,7 +324,7 @@ osd_scrub_check_update(struct osd_thread_info *info, struct osd_device *dev,
                oii = cfs_list_entry(oic, struct osd_inconsistent_item,
                                     oii_cache);
 
-       cfs_down_write(&scrub->os_rwsem);
+       down_write(&scrub->os_rwsem);
        scrub->os_new_checked++;
        if (lid->oii_ino < sf->sf_pos_latest_start && oii == NULL)
                GOTO(out, rc = 0);
@@ -348,9 +348,9 @@ iget:
                }
 
                /* Prevent the inode to be unlinked during OI scrub. */
-               cfs_mutex_lock(&inode->i_mutex);
+               mutex_lock(&inode->i_mutex);
                if (unlikely(inode->i_nlink == 0)) {
-                       cfs_mutex_unlock(&inode->i_mutex);
+                       mutex_unlock(&inode->i_mutex);
                        iput(inode);
                        GOTO(out, rc = 0);
                }
@@ -416,17 +416,17 @@ out:
        }
 
        if (ops == DTO_INDEX_INSERT) {
-               cfs_mutex_unlock(&inode->i_mutex);
+               mutex_unlock(&inode->i_mutex);
                iput(inode);
        }
-       cfs_up_write(&scrub->os_rwsem);
+       up_write(&scrub->os_rwsem);
 
        if (oii != NULL) {
                LASSERT(!cfs_list_empty(&oii->oii_list));
 
-               cfs_spin_lock(&scrub->os_lock);
+               spin_lock(&scrub->os_lock);
                cfs_list_del_init(&oii->oii_list);
-               cfs_spin_unlock(&scrub->os_lock);
+               spin_unlock(&scrub->os_lock);
                OBD_FREE_PTR(oii);
        }
        RETURN(sf->sf_param & SP_FAILOUT ? rc : 0);
@@ -438,7 +438,7 @@ static int do_osd_scrub_checkpoint(struct osd_scrub *scrub)
        int                rc;
        ENTRY;
 
-       cfs_down_write(&scrub->os_rwsem);
+       down_write(&scrub->os_rwsem);
        sf->sf_items_checked += scrub->os_new_checked;
        scrub->os_new_checked = 0;
        sf->sf_pos_last_checkpoint = scrub->os_pos_current;
@@ -446,7 +446,7 @@ static int do_osd_scrub_checkpoint(struct osd_scrub *scrub)
        sf->sf_run_time += cfs_duration_sec(cfs_time_current() + HALF_SEC -
                                            scrub->os_time_last_checkpoint);
        rc = osd_scrub_file_store(scrub);
-       cfs_up_write(&scrub->os_rwsem);
+       up_write(&scrub->os_rwsem);
 
        RETURN(rc);
 }
@@ -465,10 +465,10 @@ static void osd_scrub_post(struct osd_scrub *scrub, int result)
        struct scrub_file *sf = &scrub->os_file;
        ENTRY;
 
-       cfs_down_write(&scrub->os_rwsem);
-       cfs_spin_lock(&scrub->os_lock);
+       down_write(&scrub->os_rwsem);
+       spin_lock(&scrub->os_lock);
        thread_set_flags(&scrub->os_thread, SVC_STOPPING);
-       cfs_spin_unlock(&scrub->os_lock);
+       spin_unlock(&scrub->os_lock);
        if (scrub->os_new_checked > 0) {
                sf->sf_items_checked += scrub->os_new_checked;
                scrub->os_new_checked = 0;
@@ -496,7 +496,7 @@ static void osd_scrub_post(struct osd_scrub *scrub, int result)
                CERROR("%.16s: fail to osd_scrub_post, rc = %d\n",
                       LDISKFS_SB(osd_scrub2sb(scrub))->s_es->s_volume_name,
                       result);
-       cfs_up_write(&scrub->os_rwsem);
+       up_write(&scrub->os_rwsem);
 
        EXIT;
 }
@@ -596,9 +596,9 @@ static int osd_scrub_next(struct osd_thread_info *info, struct osd_device *dev,
        }
 
        if (OBD_FAIL_CHECK(OBD_FAIL_OSD_SCRUB_CRASH)) {
-               cfs_spin_lock(&scrub->os_lock);
+               spin_lock(&scrub->os_lock);
                thread_set_flags(thread, SVC_STOPPING);
-               cfs_spin_unlock(&scrub->os_lock);
+               spin_unlock(&scrub->os_lock);
                return SCRUB_NEXT_CRASH;
        }
 
@@ -703,10 +703,10 @@ static int osd_scrub_exec(struct osd_thread_info *info, struct osd_device *dev,
        }
 
        if (items != NULL) {
-               cfs_down_write(&scrub->os_rwsem);
+               down_write(&scrub->os_rwsem);
                scrub->os_new_checked++;
                (*items)++;
-               cfs_up_write(&scrub->os_rwsem);
+               up_write(&scrub->os_rwsem);
                goto next;
        }
 
@@ -925,10 +925,10 @@ out:
        lu_env_fini(&env);
 
 noenv:
-       cfs_spin_lock(&scrub->os_lock);
+       spin_lock(&scrub->os_lock);
        thread_set_flags(thread, SVC_STOPPED);
        cfs_waitq_broadcast(&thread->t_ctl_waitq);
-       cfs_spin_unlock(&scrub->os_lock);
+       spin_unlock(&scrub->os_lock);
        return rc;
 }
 
@@ -942,18 +942,18 @@ static int do_osd_scrub_start(struct osd_device *dev, __u32 flags)
 
 again:
        /* os_lock: sync status between stop and scrub thread */
-       cfs_spin_lock(&scrub->os_lock);
+       spin_lock(&scrub->os_lock);
        if (thread_is_running(thread)) {
-               cfs_spin_unlock(&scrub->os_lock);
+               spin_unlock(&scrub->os_lock);
                RETURN(-EALREADY);
        } else if (unlikely(thread_is_stopping(thread))) {
-               cfs_spin_unlock(&scrub->os_lock);
+               spin_unlock(&scrub->os_lock);
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopped(thread),
                             &lwi);
                goto again;
        }
-       cfs_spin_unlock(&scrub->os_lock);
+       spin_unlock(&scrub->os_lock);
 
        if (scrub->os_file.sf_status == SS_COMPLETED)
                flags |= SS_RESET;
@@ -980,9 +980,9 @@ int osd_scrub_start(struct osd_device *dev)
        ENTRY;
 
        /* od_otable_mutex: prevent curcurrent start/stop */
-       cfs_mutex_lock(&dev->od_otable_mutex);
+       mutex_lock(&dev->od_otable_mutex);
        rc = do_osd_scrub_start(dev, SS_AUTO);
-       cfs_mutex_unlock(&dev->od_otable_mutex);
+       mutex_unlock(&dev->od_otable_mutex);
 
        RETURN(rc == -EALREADY ? 0 : rc);
 }
@@ -993,28 +993,28 @@ static void do_osd_scrub_stop(struct osd_scrub *scrub)
        struct l_wait_info    lwi    = { 0 };
 
        /* os_lock: sync status between stop and scrub thread */
-       cfs_spin_lock(&scrub->os_lock);
+       spin_lock(&scrub->os_lock);
        if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
                thread_set_flags(thread, SVC_STOPPING);
-               cfs_spin_unlock(&scrub->os_lock);
+               spin_unlock(&scrub->os_lock);
                cfs_waitq_broadcast(&thread->t_ctl_waitq);
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopped(thread),
                             &lwi);
                /* Do not skip the last lock/unlock, which can guarantee that
                 * the caller cannot return until the OI scrub thread exit. */
-               cfs_spin_lock(&scrub->os_lock);
+               spin_lock(&scrub->os_lock);
        }
-       cfs_spin_unlock(&scrub->os_lock);
+       spin_unlock(&scrub->os_lock);
 }
 
 static void osd_scrub_stop(struct osd_device *dev)
 {
        /* od_otable_mutex: prevent curcurrent start/stop */
-       cfs_mutex_lock(&dev->od_otable_mutex);
+       mutex_lock(&dev->od_otable_mutex);
        dev->od_scrub.os_paused = 1;
        do_osd_scrub_stop(&dev->od_scrub);
-       cfs_mutex_unlock(&dev->od_otable_mutex);
+       mutex_unlock(&dev->od_otable_mutex);
 }
 
 static const char osd_scrub_name[] = "OI_scrub";
@@ -1043,8 +1043,8 @@ int osd_scrub_setup(const struct lu_env *env, struct osd_device *dev)
        ctxt->fs = get_ds();
 
        cfs_waitq_init(&scrub->os_thread.t_ctl_waitq);
-       cfs_init_rwsem(&scrub->os_rwsem);
-       cfs_spin_lock_init(&scrub->os_lock);
+       init_rwsem(&scrub->os_rwsem);
+       spin_lock_init(&scrub->os_lock);
        CFS_INIT_LIST_HEAD(&scrub->os_inconsistent_items);
 
        push_ctxt(&saved, ctxt, NULL);
@@ -1149,7 +1149,7 @@ static struct dt_it *osd_otable_it_init(const struct lu_env *env,
        ENTRY;
 
        /* od_otable_mutex: prevent curcurrent init/fini */
-       cfs_mutex_lock(&dev->od_otable_mutex);
+       mutex_lock(&dev->od_otable_mutex);
        if (dev->od_otable_it != NULL)
                GOTO(out, it = ERR_PTR(-EALREADY));
 
@@ -1187,7 +1187,7 @@ static struct dt_it *osd_otable_it_init(const struct lu_env *env,
        GOTO(out, it);
 
 out:
-       cfs_mutex_unlock(&dev->od_otable_mutex);
+       mutex_unlock(&dev->od_otable_mutex);
        return (struct dt_it *)it;
 }
 
@@ -1197,12 +1197,12 @@ static void osd_otable_it_fini(const struct lu_env *env, struct dt_it *di)
        struct osd_device    *dev = it->ooi_dev;
 
        /* od_otable_mutex: prevent curcurrent init/fini */
-       cfs_mutex_lock(&dev->od_otable_mutex);
+       mutex_lock(&dev->od_otable_mutex);
        do_osd_scrub_stop(&dev->od_scrub);
        LASSERT(dev->od_otable_it == it);
 
        dev->od_otable_it = NULL;
-       cfs_mutex_unlock(&dev->od_otable_mutex);
+       mutex_unlock(&dev->od_otable_mutex);
        OBD_FREE_PTR(it);
 }
 
@@ -1214,9 +1214,9 @@ static void osd_otable_it_put(const struct lu_env *env, struct dt_it *di)
        struct osd_device *dev = ((struct osd_otable_it *)di)->ooi_dev;
 
        /* od_otable_mutex: prevent curcurrent init/fini */
-       cfs_mutex_lock(&dev->od_otable_mutex);
+       mutex_lock(&dev->od_otable_mutex);
        dev->od_scrub.os_paused = 1;
-       cfs_mutex_unlock(&dev->od_otable_mutex);
+       mutex_unlock(&dev->od_otable_mutex);
 }
 
 /**
@@ -1405,9 +1405,9 @@ int osd_oii_insert(struct osd_device *dev, struct osd_idmap_cache *oic,
        oii->oii_cache = *oic;
        oii->oii_insert = insert;
 
-       cfs_spin_lock(&scrub->os_lock);
+       spin_lock(&scrub->os_lock);
        if (unlikely(!thread_is_running(thread))) {
-               cfs_spin_unlock(&scrub->os_lock);
+               spin_unlock(&scrub->os_lock);
                OBD_FREE_PTR(oii);
                RETURN(-EAGAIN);
        }
@@ -1415,7 +1415,7 @@ int osd_oii_insert(struct osd_device *dev, struct osd_idmap_cache *oic,
        if (cfs_list_empty(&scrub->os_inconsistent_items))
                wakeup = 1;
        cfs_list_add_tail(&oii->oii_list, &scrub->os_inconsistent_items);
-       cfs_spin_unlock(&scrub->os_lock);
+       spin_unlock(&scrub->os_lock);
 
        if (wakeup != 0)
                cfs_waitq_broadcast(&thread->t_ctl_waitq);
@@ -1430,15 +1430,15 @@ int osd_oii_lookup(struct osd_device *dev, const struct lu_fid *fid,
        struct osd_inconsistent_item *oii;
        ENTRY;
 
-       cfs_spin_lock(&scrub->os_lock);
+       spin_lock(&scrub->os_lock);
        cfs_list_for_each_entry(oii, &scrub->os_inconsistent_items, oii_list) {
                if (lu_fid_eq(fid, &oii->oii_cache.oic_fid)) {
                        *id = oii->oii_cache.oic_lid;
-                       cfs_spin_unlock(&scrub->os_lock);
+                       spin_unlock(&scrub->os_lock);
                        RETURN(0);
                }
        }
-       cfs_spin_unlock(&scrub->os_lock);
+       spin_unlock(&scrub->os_lock);
 
        RETURN(-ENOENT);
 }
@@ -1538,7 +1538,7 @@ int osd_scrub_dump(struct osd_device *dev, char *buf, int len)
        int                ret     = -ENOSPC;
        int                rc;
 
-       cfs_down_read(&scrub->os_rwsem);
+       down_read(&scrub->os_rwsem);
        rc = snprintf(buf, len,
                      "name: OI scrub\n"
                      "magic: 0x%x\n"
@@ -1644,6 +1644,6 @@ int osd_scrub_dump(struct osd_device *dev, char *buf, int len)
        ret = save - len;
 
 out:
-       cfs_up_read(&scrub->os_rwsem);
+       up_read(&scrub->os_rwsem);
        return ret;
 }
index b07928e..187c014 100644 (file)
@@ -172,8 +172,8 @@ struct osd_scrub {
 
        /* write lock for scrub prep/update/post/checkpoint,
         * read lock for scrub dump. */
-       cfs_rw_semaphore_t      os_rwsem;
-       cfs_spinlock_t          os_lock;
+       struct rw_semaphore     os_rwsem;
+       spinlock_t              os_lock;
 
        /* Scrub file in memory. */
        struct scrub_file       os_file;
index 5a345d6..22d3c9b 100644 (file)
@@ -299,7 +299,7 @@ static struct thandle *osd_trans_create(const struct lu_env *env,
        oh->ot_tx = tx;
        CFS_INIT_LIST_HEAD(&oh->ot_dcb_list);
        CFS_INIT_LIST_HEAD(&oh->ot_sa_list);
-       cfs_sema_init(&oh->ot_sa_lock, 1);
+       sema_init(&oh->ot_sa_lock, 1);
        memset(&oh->ot_quota_trans, 0, sizeof(oh->ot_quota_trans));
        th = &oh->ot_super;
        th->th_dev = dt;
@@ -789,9 +789,9 @@ static int osd_obd_connect(const struct lu_env *env, struct obd_export **exp,
 
        *exp = class_conn2export(&conn);
 
-       cfs_spin_lock(&osd->od_objset.lock);
+       spin_lock(&osd->od_objset.lock);
        osd->od_connects++;
-       cfs_spin_unlock(&osd->od_objset.lock);
+       spin_unlock(&osd->od_objset.lock);
 
        RETURN(0);
 }
@@ -808,11 +808,11 @@ static int osd_obd_disconnect(struct obd_export *exp)
        ENTRY;
 
        /* Only disconnect the underlying layers on the final disconnect. */
-       cfs_spin_lock(&osd->od_objset.lock);
+       spin_lock(&osd->od_objset.lock);
        osd->od_connects--;
        if (osd->od_connects == 0)
                release = 1;
-       cfs_spin_unlock(&osd->od_objset.lock);
+       spin_unlock(&osd->od_objset.lock);
 
        rc = class_disconnect(exp); /* bz 9811 */
 
index e6c21d8..0db3730 100644 (file)
@@ -169,7 +169,7 @@ struct osd_thandle {
        struct thandle           ot_super;
        cfs_list_t               ot_dcb_list;
        cfs_list_t               ot_sa_list;
-       cfs_semaphore_t          ot_sa_lock;
+       struct semaphore         ot_sa_lock;
        dmu_tx_t                *ot_tx;
        struct lquota_trans      ot_quota_trans;
        __u32                    ot_write_commit:1,
@@ -255,14 +255,14 @@ struct osd_object {
        nvlist_t                *oo_sa_xattr;
        cfs_list_t               oo_sa_linkage;
 
-       cfs_rw_semaphore_t       oo_sem;
+       struct rw_semaphore      oo_sem;
 
        /* cached attributes */
-       cfs_rwlock_t             oo_attr_lock;
+       rwlock_t                 oo_attr_lock;
        struct lu_attr           oo_attr;
 
        /* protects extended attributes */
-       cfs_semaphore_t          oo_guard;
+       struct semaphore         oo_guard;
        uint64_t                 oo_xattr;
 
        /* record size for index file */
index 8825892..c50c397 100644 (file)
@@ -86,9 +86,9 @@ static ssize_t osd_read(const struct lu_env *env, struct dt_object *dt,
        LASSERT(dt_object_exists(dt));
        LASSERT(obj->oo_db);
 
-       cfs_read_lock(&obj->oo_attr_lock);
+       read_lock(&obj->oo_attr_lock);
        old_size = obj->oo_attr.la_size;
-       cfs_read_unlock(&obj->oo_attr_lock);
+       read_unlock(&obj->oo_attr_lock);
 
        if (*pos + size > old_size) {
                if (old_size < *pos)
@@ -172,10 +172,10 @@ static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
 
        dmu_write(osd->od_objset.os, obj->oo_db->db_object, offset,
                (uint64_t)buf->lb_len, buf->lb_buf, oh->ot_tx);
-       cfs_write_lock(&obj->oo_attr_lock);
+       write_lock(&obj->oo_attr_lock);
        if (obj->oo_attr.la_size < offset + buf->lb_len) {
                obj->oo_attr.la_size = offset + buf->lb_len;
-               cfs_write_unlock(&obj->oo_attr_lock);
+               write_unlock(&obj->oo_attr_lock);
                /* osd_object_sa_update() will be copying directly from oo_attr
                 * into dbuf.  any update within a single txg will copy the
                 * most actual */
@@ -184,7 +184,7 @@ static ssize_t osd_write(const struct lu_env *env, struct dt_object *dt,
                if (unlikely(rc))
                        GOTO(out, rc);
        } else {
-               cfs_write_unlock(&obj->oo_attr_lock);
+               write_unlock(&obj->oo_attr_lock);
        }
 
        *pos += buf->lb_len;
@@ -692,17 +692,17 @@ static int osd_write_commit(const struct lu_env *env, struct dt_object *dt,
                RETURN(0);
        }
 
-       cfs_write_lock(&obj->oo_attr_lock);
+       write_lock(&obj->oo_attr_lock);
        if (obj->oo_attr.la_size < new_size) {
                obj->oo_attr.la_size = new_size;
-               cfs_write_unlock(&obj->oo_attr_lock);
+               write_unlock(&obj->oo_attr_lock);
                /* osd_object_sa_update() will be copying directly from
                 * oo_attr into dbuf. any update within a single txg will copy
                 * the most actual */
                rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
                                        &obj->oo_attr.la_size, 8, oh);
        } else {
-               cfs_write_unlock(&obj->oo_attr_lock);
+               write_unlock(&obj->oo_attr_lock);
        }
 
        RETURN(rc);
@@ -793,21 +793,21 @@ static int osd_punch(const struct lu_env *env, struct dt_object *dt,
        LASSERT(th != NULL);
        oh = container_of0(th, struct osd_thandle, ot_super);
 
-       cfs_write_lock(&obj->oo_attr_lock);
+       write_lock(&obj->oo_attr_lock);
        /* truncate */
        if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
                len = DMU_OBJECT_END;
        else
                len = end - start;
-       cfs_write_unlock(&obj->oo_attr_lock);
+       write_unlock(&obj->oo_attr_lock);
 
        rc = __osd_object_punch(osd->od_objset.os, obj->oo_db, oh->ot_tx,
                                obj->oo_attr.la_size, start, len);
        /* set new size */
        if (len == DMU_OBJECT_END) {
-               cfs_write_lock(&obj->oo_attr_lock);
+               write_lock(&obj->oo_attr_lock);
                obj->oo_attr.la_size = start;
-               cfs_write_unlock(&obj->oo_attr_lock);
+               write_unlock(&obj->oo_attr_lock);
                rc = osd_object_sa_update(obj, SA_ZPL_SIZE(uos),
                                        &obj->oo_attr.la_size, 8, oh);
        }
@@ -825,7 +825,7 @@ static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
 
        oh = container_of0(handle, struct osd_thandle, ot_super);
 
-       cfs_read_lock(&obj->oo_attr_lock);
+       read_lock(&obj->oo_attr_lock);
        if (end == OBD_OBJECT_EOF || end >= obj->oo_attr.la_size)
                len = DMU_OBJECT_END;
        else
@@ -833,10 +833,10 @@ static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
 
        /* declare we'll free some blocks ... */
        if (start < obj->oo_attr.la_size) {
-               cfs_read_unlock(&obj->oo_attr_lock);
+               read_unlock(&obj->oo_attr_lock);
                dmu_tx_hold_free(oh->ot_tx, obj->oo_db->db_object, start, len);
        } else {
-               cfs_read_unlock(&obj->oo_attr_lock);
+               read_unlock(&obj->oo_attr_lock);
        }
 
        /* ... and we'll modify size attribute */
index bb10a1f..a619f2c 100644 (file)
@@ -124,12 +124,12 @@ osd_object_sa_dirty_add(struct osd_object *obj, struct osd_thandle *oh)
        if (!cfs_list_empty(&obj->oo_sa_linkage))
                return;
 
-       cfs_down(&oh->ot_sa_lock);
-       cfs_write_lock(&obj->oo_attr_lock);
+       down(&oh->ot_sa_lock);
+       write_lock(&obj->oo_attr_lock);
        if (likely(cfs_list_empty(&obj->oo_sa_linkage)))
                cfs_list_add(&obj->oo_sa_linkage, &oh->ot_sa_list);
-       cfs_write_unlock(&obj->oo_attr_lock);
-       cfs_up(&oh->ot_sa_lock);
+       write_unlock(&obj->oo_attr_lock);
+       up(&oh->ot_sa_lock);
 }
 
 /*
@@ -139,16 +139,16 @@ void osd_object_sa_dirty_rele(struct osd_thandle *oh)
 {
        struct osd_object *obj;
 
-       cfs_down(&oh->ot_sa_lock);
+       down(&oh->ot_sa_lock);
        while (!cfs_list_empty(&oh->ot_sa_list)) {
                obj = cfs_list_entry(oh->ot_sa_list.next,
                                     struct osd_object, oo_sa_linkage);
                sa_spill_rele(obj->oo_sa_hdl);
-               cfs_write_lock(&obj->oo_attr_lock);
+               write_lock(&obj->oo_attr_lock);
                cfs_list_del_init(&obj->oo_sa_linkage);
-               cfs_write_unlock(&obj->oo_attr_lock);
+               write_unlock(&obj->oo_attr_lock);
        }
-       cfs_up(&oh->ot_sa_lock);
+       up(&oh->ot_sa_lock);
 }
 
 /*
@@ -299,9 +299,9 @@ struct lu_object *osd_object_alloc(const struct lu_env *env,
                mo->oo_dt.do_ops = &osd_obj_ops;
                l->lo_ops = &osd_lu_obj_ops;
                CFS_INIT_LIST_HEAD(&mo->oo_sa_linkage);
-               cfs_init_rwsem(&mo->oo_sem);
-               cfs_sema_init(&mo->oo_guard, 1);
-               cfs_rwlock_init(&mo->oo_attr_lock);
+               init_rwsem(&mo->oo_sem);
+               sema_init(&mo->oo_guard, 1);
+               rwlock_init(&mo->oo_attr_lock);
                return l;
        } else {
                return NULL;
@@ -500,9 +500,9 @@ static int osd_declare_object_destroy(const struct lu_env *env,
 int __osd_object_free(udmu_objset_t *uos, uint64_t oid, dmu_tx_t *tx)
 {
        LASSERT(uos->objects != 0);
-       cfs_spin_lock(&uos->lock);
+       spin_lock(&uos->lock);
        uos->objects--;
-       cfs_spin_unlock(&uos->lock);
+       spin_unlock(&uos->lock);
 
        return -dmu_object_free(uos->os, oid, tx);
 }
@@ -668,7 +668,7 @@ static void osd_object_read_lock(const struct lu_env *env,
 
        LASSERT(osd_invariant(obj));
 
-       cfs_down_read(&obj->oo_sem);
+       down_read(&obj->oo_sem);
 }
 
 static void osd_object_write_lock(const struct lu_env *env,
@@ -678,7 +678,7 @@ static void osd_object_write_lock(const struct lu_env *env,
 
        LASSERT(osd_invariant(obj));
 
-       cfs_down_write(&obj->oo_sem);
+       down_write(&obj->oo_sem);
 }
 
 static void osd_object_read_unlock(const struct lu_env *env,
@@ -687,7 +687,7 @@ static void osd_object_read_unlock(const struct lu_env *env,
        struct osd_object *obj = osd_dt_obj(dt);
 
        LASSERT(osd_invariant(obj));
-       cfs_up_read(&obj->oo_sem);
+       up_read(&obj->oo_sem);
 }
 
 static void osd_object_write_unlock(const struct lu_env *env,
@@ -696,7 +696,7 @@ static void osd_object_write_unlock(const struct lu_env *env,
         struct osd_object *obj = osd_dt_obj(dt);
 
         LASSERT(osd_invariant(obj));
-        cfs_up_write(&obj->oo_sem);
+       up_write(&obj->oo_sem);
 }
 
 static int osd_object_write_locked(const struct lu_env *env,
@@ -707,9 +707,9 @@ static int osd_object_write_locked(const struct lu_env *env,
 
        LASSERT(osd_invariant(obj));
 
-       if (cfs_down_write_trylock(&obj->oo_sem)) {
+       if (down_write_trylock(&obj->oo_sem)) {
                rc = 0;
-               cfs_up_write(&obj->oo_sem);
+               up_write(&obj->oo_sem);
        }
        return rc;
 }
@@ -727,9 +727,9 @@ static int osd_attr_get(const struct lu_env *env,
        LASSERT(osd_invariant(obj));
        LASSERT(obj->oo_db);
 
-       cfs_read_lock(&obj->oo_attr_lock);
+       read_lock(&obj->oo_attr_lock);
        *attr = obj->oo_attr;
-       cfs_read_unlock(&obj->oo_attr_lock);
+       read_unlock(&obj->oo_attr_lock);
 
        /* with ZFS_DEBUG zrl_add_debug() called by DB_DNODE_ENTER()
         * from within sa_object_size() can block on a mutex, so
@@ -944,7 +944,7 @@ static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
                                obj->oo_attr.la_gid, rc);
        }
 
-       cfs_write_lock(&obj->oo_attr_lock);
+       write_lock(&obj->oo_attr_lock);
        cnt = 0;
        if (la->la_valid & LA_ATIME) {
                osa->atime[0] = obj->oo_attr.la_atime = la->la_atime;
@@ -1000,7 +1000,7 @@ static int osd_attr_set(const struct lu_env *env, struct dt_object *dt,
                                 &osa->gid, 8);
        }
        obj->oo_attr.la_valid |= la->la_valid;
-       cfs_write_unlock(&obj->oo_attr_lock);
+       write_unlock(&obj->oo_attr_lock);
 
        rc = osd_object_sa_bulk_update(obj, bulk, cnt, oh);
 
@@ -1182,9 +1182,9 @@ int __osd_object_create(const struct lu_env *env, udmu_objset_t *uos,
        int      rc;
 
        LASSERT(tag);
-       cfs_spin_lock(&uos->lock);
+       spin_lock(&uos->lock);
        uos->objects++;
-       cfs_spin_unlock(&uos->lock);
+       spin_unlock(&uos->lock);
 
        /* Assert that the transaction has been assigned to a
           transaction group. */
@@ -1223,9 +1223,9 @@ int __osd_zap_create(const struct lu_env *env, udmu_objset_t *uos,
 
        LASSERT(tag);
 
-       cfs_spin_lock(&uos->lock);
+       spin_lock(&uos->lock);
        uos->objects++;
-       cfs_spin_unlock(&uos->lock);
+       spin_unlock(&uos->lock);
 
        /* Assert that the transaction has been assigned to a
           transaction group. */
@@ -1415,7 +1415,7 @@ static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
        /* concurrent create declarations should not see
         * the object inconsistent (db, attr, etc).
         * in regular cases acquisition should be cheap */
-       cfs_down(&obj->oo_guard);
+       down(&obj->oo_guard);
 
        LASSERT(osd_invariant(obj));
        LASSERT(!dt_object_exists(dt));
@@ -1474,7 +1474,7 @@ static int osd_object_create(const struct lu_env *env, struct dt_object *dt,
        }
 
 out:
-       cfs_up(&obj->oo_guard);
+       up(&obj->oo_guard);
        RETURN(rc);
 }
 
@@ -1507,9 +1507,9 @@ static int osd_object_ref_add(const struct lu_env *env,
 
        oh = container_of0(handle, struct osd_thandle, ot_super);
 
-       cfs_write_lock(&obj->oo_attr_lock);
+       write_lock(&obj->oo_attr_lock);
        nlink = ++obj->oo_attr.la_nlink;
-       cfs_write_unlock(&obj->oo_attr_lock);
+       write_unlock(&obj->oo_attr_lock);
 
        rc = osd_object_sa_update(obj, SA_ZPL_LINKS(uos), &nlink, 8, oh);
        return rc;
@@ -1545,9 +1545,9 @@ static int osd_object_ref_del(const struct lu_env *env,
        oh = container_of0(handle, struct osd_thandle, ot_super);
        LASSERT(!lu_object_is_dying(dt->do_lu.lo_header));
 
-       cfs_write_lock(&obj->oo_attr_lock);
+       write_lock(&obj->oo_attr_lock);
        nlink = --obj->oo_attr.la_nlink;
-       cfs_write_unlock(&obj->oo_attr_lock);
+       write_unlock(&obj->oo_attr_lock);
 
        rc = osd_object_sa_update(obj, SA_ZPL_LINKS(uos), &nlink, 8, oh);
        return rc;
@@ -1571,14 +1571,14 @@ static int capa_is_sane(const struct lu_env *env, struct osd_device *dev,
                RETURN(rc);
        }
 
-       cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
        for (i = 0; i < 2; i++) {
                if (keys[i].lk_keyid == capa->lc_keyid) {
                        oti->oti_capa_key = keys[i];
                        break;
                }
        }
-       cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 
        if (i == 2) {
                DEBUG_CAPA(D_ERROR, capa, "no matched capa key");
@@ -1674,9 +1674,9 @@ static struct obd_capa *osd_capa_get(const struct lu_env *env,
                RETURN(oc);
        }
 
-       cfs_spin_lock(&capa_lock);
+       spin_lock(&capa_lock);
        *key = dev->od_capa_keys[1];
-       cfs_spin_unlock(&capa_lock);
+       spin_unlock(&capa_lock);
 
        capa->lc_keyid = key->lk_keyid;
        capa->lc_expiry = cfs_time_current_sec() + dev->od_capa_timeout;
index 94372f2..10033c0 100644 (file)
@@ -234,9 +234,9 @@ int osd_xattr_get(const struct lu_env *env, struct dt_object *dt,
        LASSERT(osd_invariant(obj));
        LASSERT(dt_object_exists(dt));
 
-       cfs_down(&obj->oo_guard);
+       down(&obj->oo_guard);
        rc = __osd_xattr_get(env, obj, buf, name, &size);
-       cfs_up(&obj->oo_guard);
+       up(&obj->oo_guard);
 
        if (rc == -ENOENT)
                rc = -ENODATA;
@@ -321,9 +321,9 @@ int osd_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
        LASSERT(handle != NULL);
        oh = container_of0(handle, struct osd_thandle, ot_super);
 
-       cfs_down(&obj->oo_guard);
+       down(&obj->oo_guard);
        __osd_xattr_declare_set(env, obj, buf->lb_len, name, oh);
-       cfs_up(&obj->oo_guard);
+       up(&obj->oo_guard);
 
        RETURN(0);
 }
@@ -564,11 +564,11 @@ int osd_xattr_set(const struct lu_env *env, struct dt_object *dt,
 
        oh = container_of0(handle, struct osd_thandle, ot_super);
 
-       cfs_down(&obj->oo_guard);
+       down(&obj->oo_guard);
        CDEBUG(D_INODE, "Setting xattr %s with size %d\n",
                name, (int)buf->lb_len);
        rc = osd_xattr_set_internal(env, obj, buf, name, fl, oh, capa);
-       cfs_up(&obj->oo_guard);
+       up(&obj->oo_guard);
 
        RETURN(rc);
 }
@@ -625,9 +625,9 @@ int osd_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
        LASSERT(oh->ot_tx != NULL);
        LASSERT(obj->oo_db != NULL);
 
-       cfs_down(&obj->oo_guard);
+       down(&obj->oo_guard);
        __osd_xattr_declare_del(env, obj, name, oh);
-       cfs_up(&obj->oo_guard);
+       up(&obj->oo_guard);
 
        RETURN(0);
 }
@@ -700,9 +700,9 @@ int osd_xattr_del(const struct lu_env *env, struct dt_object *dt,
        oh = container_of0(handle, struct osd_thandle, ot_super);
        LASSERT(oh->ot_tx != NULL);
 
-       cfs_down(&obj->oo_guard);
+       down(&obj->oo_guard);
        rc = __osd_xattr_del(env, obj, name, oh);
-       cfs_up(&obj->oo_guard);
+       up(&obj->oo_guard);
 
        RETURN(rc);
 }
@@ -755,7 +755,7 @@ int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
        LASSERT(osd_invariant(obj));
        LASSERT(dt_object_exists(dt));
 
-       cfs_down(&obj->oo_guard);
+       down(&obj->oo_guard);
 
        rc = osd_sa_xattr_list(env, obj, lb);
        if (rc < 0)
@@ -795,7 +795,7 @@ int osd_xattr_list(const struct lu_env *env, struct dt_object *dt,
 out_fini:
        udmu_zap_cursor_fini(zc);
 out:
-       cfs_up(&obj->oo_guard);
+       up(&obj->oo_guard);
        RETURN(rc);
 
 }
index 1bb5d2a..ece8e7e 100644 (file)
@@ -149,7 +149,7 @@ int udmu_objset_open(char *osname, udmu_objset_t *uos)
        dmu_objset_space(uos->os, &refdbytes, &availbytes, &usedobjs,
                         &availobjs);
        uos->objects = usedobjs;
-       cfs_spin_lock_init(&uos->lock);
+       spin_lock_init(&uos->lock);
 
 out:
        if (error && uos->os != NULL)
index 45487b6..876d8eb 100644 (file)
@@ -55,7 +55,7 @@
 typedef struct udmu_objset {
        struct objset   *os;
        uint64_t        root;  /* id of root znode */
-       cfs_spinlock_t  lock;  /* protects objects below */
+       spinlock_t      lock;  /* protects objects below */
        uint64_t        objects; /* in-core counter of objects */
        /* SA attr mapping->id,
         * name is the same as in ZFS to use defines SA_ZPL_...*/
index c86efd8..823e77b 100644 (file)
@@ -202,9 +202,9 @@ int osp_disconnect(struct osp_device *d)
         * of the cleanup RPCs fails (e.g. ldlm cancel, etc).  We don't
         * fully deactivate the import, or that would drop all requests. */
        LASSERT(imp != NULL);
-       cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
        imp->imp_deactive = 1;
-       cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
        ptlrpc_deactivate_import(imp);
 
@@ -333,10 +333,10 @@ static int osp_statfs(const struct lu_env *env, struct dt_device *dev,
         * layer above osp (usually lod) can use ffree to estimate
         * how many objects are available for immediate creation
         */
-       cfs_spin_lock(&d->opd_pre_lock);
+       spin_lock(&d->opd_pre_lock);
        sfs->os_fprecreated = d->opd_pre_last_created - d->opd_pre_used_id;
        sfs->os_fprecreated -= d->opd_pre_reserved;
-       cfs_spin_unlock(&d->opd_pre_lock);
+       spin_unlock(&d->opd_pre_lock);
 
        LASSERT(sfs->os_fprecreated <= OST_MAX_PRECREATE * 2);
 
@@ -750,10 +750,10 @@ static int osp_obd_statfs(const struct lu_env *env, struct obd_export *exp,
 
        /* Since the request might also come from lprocfs, so we need
         * sync this with client_disconnect_export Bug15684 */
-       cfs_down_read(&exp->exp_obd->u.cli.cl_sem);
+       down_read(&exp->exp_obd->u.cli.cl_sem);
        if (exp->exp_obd->u.cli.cl_import)
                imp = class_import_get(exp->exp_obd->u.cli.cl_import);
-       cfs_up_read(&exp->exp_obd->u.cli.cl_sem);
+       up_read(&exp->exp_obd->u.cli.cl_sem);
        if (!imp)
                RETURN(-ENODEV);
 
index 900687d..ae5cfe0 100644 (file)
@@ -50,7 +50,7 @@
  * Infrastructure to support tracking of last committed llog record
  */
 struct osp_id_tracker {
-       cfs_spinlock_t           otr_lock;
+       spinlock_t               otr_lock;
        __u32                    otr_next_id;
        __u32                    otr_committed_id;
        /* callback is register once per diskfs -- that's the whole point */
@@ -102,7 +102,7 @@ struct osp_device {
        /*
         * Precreation pool
         */
-       cfs_spinlock_t                   opd_pre_lock;
+       spinlock_t                       opd_pre_lock;
        /* last id assigned in creation */
        __u64                            opd_pre_used_id;
        /* last created id OST reported, next-created - available id's */
@@ -127,7 +127,7 @@ struct osp_device {
        /*
         * OST synchronization
         */
-       cfs_spinlock_t                   opd_syn_lock;
+       spinlock_t                       opd_syn_lock;
        /* unique generation, to recognize start of new records in the llog */
        struct llog_gen                  opd_syn_generation;
        /* number of changes to sync, used to wake up sync thread */
index d94b5b9..aef4b87 100644 (file)
@@ -220,7 +220,7 @@ static int osp_declare_object_create(const struct lu_env *env,
                                             th);
        } else {
                /* not needed in the cache anymore */
-               cfs_set_bit(LU_OBJECT_HEARD_BANSHEE,
+               set_bit(LU_OBJECT_HEARD_BANSHEE,
                            &dt->do_lu.lo_header->loh_flags);
        }
        RETURN(rc);
@@ -247,9 +247,9 @@ static int osp_object_create(const struct lu_env *env, struct dt_object *dt,
                rc = fid_ostid_pack(lu_object_fid(&dt->do_lu), &osi->osi_oi);
                LASSERT(rc == 0);
                osi->osi_id = ostid_id(&osi->osi_oi);
-               cfs_spin_lock(&d->opd_pre_lock);
+               spin_lock(&d->opd_pre_lock);
                osp_update_last_id(d, osi->osi_id);
-               cfs_spin_unlock(&d->opd_pre_lock);
+               spin_unlock(&d->opd_pre_lock);
        }
 
        LASSERT(osi->osi_id);
@@ -263,20 +263,20 @@ static int osp_object_create(const struct lu_env *env, struct dt_object *dt,
 
        /* we might have lost precreated objects */
        if (unlikely(d->opd_gap_count) > 0) {
-               cfs_spin_lock(&d->opd_pre_lock);
+               spin_lock(&d->opd_pre_lock);
                if (d->opd_gap_count > 0) {
                        int count = d->opd_gap_count;
 
                        osi->osi_oi.oi_id = d->opd_gap_start;
                        d->opd_gap_count = 0;
-                       cfs_spin_unlock(&d->opd_pre_lock);
+                       spin_unlock(&d->opd_pre_lock);
 
                        CDEBUG(D_HA, "Found gap "LPU64"+%d in objids\n",
                               d->opd_gap_start, count);
                        /* real gap handling is disabled intil ORI-692 will be
                         * fixed, now we only report gaps */
                } else {
-                       cfs_spin_unlock(&d->opd_pre_lock);
+                       spin_unlock(&d->opd_pre_lock);
                }
        }
 
@@ -325,7 +325,7 @@ static int osp_object_destroy(const struct lu_env *env, struct dt_object *dt,
        rc = osp_sync_add(env, o, MDS_UNLINK64_REC, th, NULL);
 
        /* not needed in cache any more */
-       cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
+       set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
 
        RETURN(rc);
 }
@@ -372,12 +372,12 @@ static void osp_object_release(const struct lu_env *env, struct lu_object *o)
         */
        if (unlikely(po->opo_reserved)) {
                LASSERT(d->opd_pre_reserved > 0);
-               cfs_spin_lock(&d->opd_pre_lock);
+               spin_lock(&d->opd_pre_lock);
                d->opd_pre_reserved--;
-               cfs_spin_unlock(&d->opd_pre_lock);
+               spin_unlock(&d->opd_pre_lock);
 
                /* not needed in cache any more */
-               cfs_set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
+               set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
        }
        EXIT;
 }
index 82b822d..7d13618 100644 (file)
@@ -212,9 +212,9 @@ static inline int osp_precreate_near_empty(struct osp_device *d)
        int rc;
 
        /* XXX: do we really need locking here? */
-       cfs_spin_lock(&d->opd_pre_lock);
+       spin_lock(&d->opd_pre_lock);
        rc = osp_precreate_near_empty_nolock(d);
-       cfs_spin_unlock(&d->opd_pre_lock);
+       spin_unlock(&d->opd_pre_lock);
        return rc;
 }
 
@@ -254,11 +254,11 @@ static int osp_precreate_send(struct osp_device *d)
                RETURN(rc);
        }
 
-       cfs_spin_lock(&d->opd_pre_lock);
+       spin_lock(&d->opd_pre_lock);
        if (d->opd_pre_grow_count > d->opd_pre_max_grow_count / 2)
                d->opd_pre_grow_count = d->opd_pre_max_grow_count / 2;
        grow = d->opd_pre_grow_count;
-       cfs_spin_unlock(&d->opd_pre_lock);
+       spin_unlock(&d->opd_pre_lock);
 
        body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
        LASSERT(body);
@@ -286,7 +286,7 @@ static int osp_precreate_send(struct osp_device *d)
 
        diff = body->oa.o_id - d->opd_pre_last_created;
 
-       cfs_spin_lock(&d->opd_pre_lock);
+       spin_lock(&d->opd_pre_lock);
        if (diff < grow) {
                /* the OST has not managed to create all the
                 * objects we asked for */
@@ -299,7 +299,7 @@ static int osp_precreate_send(struct osp_device *d)
                d->opd_pre_grow_slow = 0;
        }
        d->opd_pre_last_created = body->oa.o_id;
-       cfs_spin_unlock(&d->opd_pre_lock);
+       spin_unlock(&d->opd_pre_lock);
        CDEBUG(D_OTHER, "current precreated pool: %llu-%llu\n",
               d->opd_pre_used_id, d->opd_pre_last_created);
 
@@ -427,7 +427,7 @@ static int osp_precreate_cleanup_orphans(struct osp_device *d)
        /*
         * OST provides us with id new pool starts from in body->oa.o_id
         */
-       cfs_spin_lock(&d->opd_pre_lock);
+       spin_lock(&d->opd_pre_lock);
        if (le64_to_cpu(d->opd_last_used_id) > body->oa.o_id) {
                d->opd_pre_grow_count = OST_MIN_PRECREATE +
                                        le64_to_cpu(d->opd_last_used_id) -
@@ -439,7 +439,7 @@ static int osp_precreate_cleanup_orphans(struct osp_device *d)
        }
        d->opd_pre_used_id = d->opd_pre_last_created;
        d->opd_pre_grow_slow = 0;
-       cfs_spin_unlock(&d->opd_pre_lock);
+       spin_unlock(&d->opd_pre_lock);
 
        CDEBUG(D_HA, "%s: Got last_id "LPU64" from OST, last_used is "LPU64
               ", pre_used "LPU64"\n", d->opd_obd->obd_name, body->oa.o_id,
@@ -529,9 +529,9 @@ static int osp_precreate_thread(void *_arg)
        sprintf(pname, "osp-pre-%u\n", d->opd_index);
        cfs_daemonize(pname);
 
-       cfs_spin_lock(&d->opd_pre_lock);
+       spin_lock(&d->opd_pre_lock);
        thread->t_flags = SVC_RUNNING;
-       cfs_spin_unlock(&d->opd_pre_lock);
+       spin_unlock(&d->opd_pre_lock);
        cfs_waitq_signal(&thread->t_ctl_waitq);
 
        while (osp_precreate_running(d)) {
@@ -716,17 +716,17 @@ int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
                    d->opd_pre_grow_slow == 0 &&
                    (d->opd_pre_last_created - d->opd_pre_used_id <=
                     d->opd_pre_grow_count / 4 + 1)) {
-                       cfs_spin_lock(&d->opd_pre_lock);
+                       spin_lock(&d->opd_pre_lock);
                        d->opd_pre_grow_slow = 1;
                        d->opd_pre_grow_count *= 2;
-                       cfs_spin_unlock(&d->opd_pre_lock);
+                       spin_unlock(&d->opd_pre_lock);
                }
 
-               cfs_spin_lock(&d->opd_pre_lock);
+               spin_lock(&d->opd_pre_lock);
                precreated = d->opd_pre_last_created - d->opd_pre_used_id;
                if (precreated > d->opd_pre_reserved) {
                        d->opd_pre_reserved++;
-                       cfs_spin_unlock(&d->opd_pre_lock);
+                       spin_unlock(&d->opd_pre_lock);
                        rc = 0;
 
                        /* XXX: don't wake up if precreation is in progress */
@@ -735,7 +735,7 @@ int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
 
                        break;
                }
-               cfs_spin_unlock(&d->opd_pre_lock);
+               spin_unlock(&d->opd_pre_lock);
 
                /*
                 * all precreated objects have been used and no-space
@@ -777,7 +777,7 @@ __u64 osp_precreate_get_id(struct osp_device *d)
        obd_id objid;
 
        /* grab next id from the pool */
-       cfs_spin_lock(&d->opd_pre_lock);
+       spin_lock(&d->opd_pre_lock);
        LASSERT(d->opd_pre_used_id < d->opd_pre_last_created);
        objid = ++d->opd_pre_used_id;
        d->opd_pre_reserved--;
@@ -786,7 +786,7 @@ __u64 osp_precreate_get_id(struct osp_device *d)
         * we might miscalculate gap causing object loss or leak
         */
        osp_update_last_id(d, objid);
-       cfs_spin_unlock(&d->opd_pre_lock);
+       spin_unlock(&d->opd_pre_lock);
 
        /*
         * probably main thread suspended orphan cleanup till
@@ -887,7 +887,7 @@ int osp_init_precreate(struct osp_device *d)
        d->opd_pre_min_grow_count = OST_MIN_PRECREATE;
        d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
 
-       cfs_spin_lock_init(&d->opd_pre_lock);
+       spin_lock_init(&d->opd_pre_lock);
        cfs_waitq_init(&d->opd_pre_waitq);
        cfs_waitq_init(&d->opd_pre_user_waitq);
        cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq);
index 4df325a..2728d4b 100644 (file)
@@ -268,9 +268,9 @@ static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d,
                rc = 0;
 
        if (likely(rc == 0)) {
-               cfs_spin_lock(&d->opd_syn_lock);
+               spin_lock(&d->opd_syn_lock);
                d->opd_syn_changes++;
-               cfs_spin_unlock(&d->opd_syn_lock);
+               spin_unlock(&d->opd_syn_lock);
        }
 
        RETURN(rc);
@@ -327,9 +327,9 @@ static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
                /* this request was aborted by the shutdown procedure,
                 * not committed by the peer.  we should preserve llog
                 * record */
-               cfs_spin_lock(&d->opd_syn_lock);
+               spin_lock(&d->opd_syn_lock);
                d->opd_syn_rpc_in_progress--;
-               cfs_spin_unlock(&d->opd_syn_lock);
+               spin_unlock(&d->opd_syn_lock);
                cfs_waitq_signal(&d->opd_syn_waitq);
                return;
        }
@@ -341,9 +341,9 @@ static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
 
        ptlrpc_request_addref(req);
 
-       cfs_spin_lock(&d->opd_syn_lock);
+       spin_lock(&d->opd_syn_lock);
        cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
-       cfs_spin_unlock(&d->opd_syn_lock);
+       spin_unlock(&d->opd_syn_lock);
 
        /* XXX: some batching wouldn't hurt */
        cfs_waitq_signal(&d->opd_syn_waitq);
@@ -375,9 +375,9 @@ static int osp_sync_interpret(const struct lu_env *env,
 
                ptlrpc_request_addref(req);
 
-               cfs_spin_lock(&d->opd_syn_lock);
+               spin_lock(&d->opd_syn_lock);
                cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
-               cfs_spin_unlock(&d->opd_syn_lock);
+               spin_unlock(&d->opd_syn_lock);
 
                cfs_waitq_signal(&d->opd_syn_waitq);
        } else if (rc) {
@@ -395,9 +395,9 @@ static int osp_sync_interpret(const struct lu_env *env,
                        /* this is the last time we see the request
                         * if transno is not zero, then commit cb
                         * will be called at some point */
-                       cfs_spin_lock(&d->opd_syn_lock);
+                       spin_lock(&d->opd_syn_lock);
                        d->opd_syn_rpc_in_progress--;
-                       cfs_spin_unlock(&d->opd_syn_lock);
+                       spin_unlock(&d->opd_syn_lock);
                }
 
                cfs_waitq_signal(&d->opd_syn_waitq);
@@ -411,9 +411,9 @@ static int osp_sync_interpret(const struct lu_env *env,
        }
 
        LASSERT(d->opd_syn_rpc_in_flight > 0);
-       cfs_spin_lock(&d->opd_syn_lock);
+       spin_lock(&d->opd_syn_lock);
        d->opd_syn_rpc_in_flight--;
-       cfs_spin_unlock(&d->opd_syn_lock);
+       spin_unlock(&d->opd_syn_lock);
        CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
               d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
               d->opd_syn_rpc_in_progress);
@@ -606,10 +606,10 @@ static int osp_sync_process_record(const struct lu_env *env,
 
        /* notice we increment counters before sending RPC, to be consistent
         * in RPC interpret callback which may happen very quickly */
-       cfs_spin_lock(&d->opd_syn_lock);
+       spin_lock(&d->opd_syn_lock);
        d->opd_syn_rpc_in_flight++;
        d->opd_syn_rpc_in_progress++;
-       cfs_spin_unlock(&d->opd_syn_lock);
+       spin_unlock(&d->opd_syn_lock);
 
        switch (rec->lrh_type) {
        /* case MDS_UNLINK_REC is kept for compatibility */
@@ -629,7 +629,7 @@ static int osp_sync_process_record(const struct lu_env *env,
        }
 
        if (likely(rc == 0)) {
-               cfs_spin_lock(&d->opd_syn_lock);
+               spin_lock(&d->opd_syn_lock);
                if (d->opd_syn_prev_done) {
                        LASSERT(d->opd_syn_changes > 0);
                        LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
@@ -645,12 +645,12 @@ static int osp_sync_process_record(const struct lu_env *env,
                CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
                       d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
                       d->opd_syn_rpc_in_progress);
-               cfs_spin_unlock(&d->opd_syn_lock);
+               spin_unlock(&d->opd_syn_lock);
        } else {
-               cfs_spin_lock(&d->opd_syn_lock);
+               spin_lock(&d->opd_syn_lock);
                d->opd_syn_rpc_in_flight--;
                d->opd_syn_rpc_in_progress--;
-               cfs_spin_unlock(&d->opd_syn_lock);
+               spin_unlock(&d->opd_syn_lock);
        }
 
        CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
@@ -698,10 +698,10 @@ static void osp_sync_process_committed(const struct lu_env *env,
        LASSERT(llh);
 
        CFS_INIT_LIST_HEAD(&list);
-       cfs_spin_lock(&d->opd_syn_lock);
+       spin_lock(&d->opd_syn_lock);
        cfs_list_splice(&d->opd_syn_committed_there, &list);
        CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
-       cfs_spin_unlock(&d->opd_syn_lock);
+       spin_unlock(&d->opd_syn_lock);
 
        cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
                LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
@@ -729,9 +729,9 @@ static void osp_sync_process_committed(const struct lu_env *env,
        llog_ctxt_put(ctxt);
 
        LASSERT(d->opd_syn_rpc_in_progress >= done);
-       cfs_spin_lock(&d->opd_syn_lock);
+       spin_lock(&d->opd_syn_lock);
        d->opd_syn_rpc_in_progress -= done;
-       cfs_spin_unlock(&d->opd_syn_lock);
+       spin_unlock(&d->opd_syn_lock);
        CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
               d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
               d->opd_syn_rpc_in_progress);
@@ -847,9 +847,9 @@ static int osp_sync_thread(void *_arg)
        sprintf(pname, "osp-syn-%u\n", d->opd_index);
        cfs_daemonize(pname);
 
-       cfs_spin_lock(&d->opd_syn_lock);
+       spin_lock(&d->opd_syn_lock);
        thread->t_flags = SVC_RUNNING;
-       cfs_spin_unlock(&d->opd_syn_lock);
+       spin_unlock(&d->opd_syn_lock);
        cfs_waitq_signal(&thread->t_ctl_waitq);
 
        ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
@@ -1040,7 +1040,7 @@ int osp_sync_init(const struct lu_env *env, struct osp_device *d)
         */
        d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
        d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
-       cfs_spin_lock_init(&d->opd_syn_lock);
+       spin_lock_init(&d->opd_syn_lock);
        cfs_waitq_init(&d->opd_syn_waitq);
        cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
        CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
@@ -1082,7 +1082,7 @@ int osp_sync_fini(struct osp_device *d)
        RETURN(0);
 }
 
-static CFS_DEFINE_MUTEX(osp_id_tracker_sem);
+static DEFINE_MUTEX(osp_id_tracker_sem);
 static CFS_LIST_HEAD(osp_id_tracker_list);
 
 static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
@@ -1097,7 +1097,7 @@ static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
        if (txn == NULL || txn->oti_current_id < tr->otr_committed_id)
                return;
 
-       cfs_spin_lock(&tr->otr_lock);
+       spin_lock(&tr->otr_lock);
        if (likely(txn->oti_current_id > tr->otr_committed_id)) {
                CDEBUG(D_OTHER, "committed: %u -> %u\n",
                       tr->otr_committed_id, txn->oti_current_id);
@@ -1109,7 +1109,7 @@ static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
                        cfs_waitq_signal(&d->opd_syn_waitq);
                }
        }
-       cfs_spin_unlock(&tr->otr_lock);
+       spin_unlock(&tr->otr_lock);
 }
 
 static int osp_sync_id_traction_init(struct osp_device *d)
@@ -1122,7 +1122,7 @@ static int osp_sync_id_traction_init(struct osp_device *d)
        LASSERT(d->opd_syn_tracker == NULL);
        CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
 
-       cfs_mutex_lock(&osp_id_tracker_sem);
+       mutex_lock(&osp_id_tracker_sem);
        cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
                if (tr->otr_dev == d->opd_storage) {
                        LASSERT(cfs_atomic_read(&tr->otr_refcount));
@@ -1138,7 +1138,7 @@ static int osp_sync_id_traction_init(struct osp_device *d)
                OBD_ALLOC_PTR(tr);
                if (tr) {
                        d->opd_syn_tracker = tr;
-                       cfs_spin_lock_init(&tr->otr_lock);
+                       spin_lock_init(&tr->otr_lock);
                        tr->otr_dev = d->opd_storage;
                        tr->otr_next_id = 1;
                        tr->otr_committed_id = 0;
@@ -1153,7 +1153,7 @@ static int osp_sync_id_traction_init(struct osp_device *d)
                        rc = 0;
                }
        }
-       cfs_mutex_unlock(&osp_id_tracker_sem);
+       mutex_unlock(&osp_id_tracker_sem);
 
        return rc;
 }
@@ -1173,7 +1173,7 @@ static void osp_sync_id_traction_fini(struct osp_device *d)
 
        osp_sync_remove_from_tracker(d);
 
-       cfs_mutex_lock(&osp_id_tracker_sem);
+       mutex_lock(&osp_id_tracker_sem);
        if (cfs_atomic_dec_and_test(&tr->otr_refcount)) {
                dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
                LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
@@ -1181,7 +1181,7 @@ static void osp_sync_id_traction_fini(struct osp_device *d)
                OBD_FREE_PTR(tr);
                d->opd_syn_tracker = NULL;
        }
-       cfs_mutex_unlock(&osp_id_tracker_sem);
+       mutex_unlock(&osp_id_tracker_sem);
 
        EXIT;
 }
@@ -1197,9 +1197,9 @@ static __u32 osp_sync_id_get(struct osp_device *d, __u32 id)
        LASSERT(tr);
 
        /* XXX: we can improve this introducing per-cpu preallocated ids? */
-       cfs_spin_lock(&tr->otr_lock);
+       spin_lock(&tr->otr_lock);
        if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) {
-               cfs_spin_unlock(&tr->otr_lock);
+               spin_unlock(&tr->otr_lock);
                CERROR("%s: next %u, last synced %lu\n",
                       d->opd_obd->obd_name, tr->otr_next_id,
                       d->opd_syn_last_used_id);
@@ -1212,7 +1212,7 @@ static __u32 osp_sync_id_get(struct osp_device *d, __u32 id)
                d->opd_syn_last_used_id = id;
        if (cfs_list_empty(&d->opd_syn_ontrack))
                cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
-       cfs_spin_unlock(&tr->otr_lock);
+       spin_unlock(&tr->otr_lock);
        CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
 
        return id;
@@ -1228,8 +1228,8 @@ static void osp_sync_remove_from_tracker(struct osp_device *d)
        if (cfs_list_empty(&d->opd_syn_ontrack))
                return;
 
-       cfs_spin_lock(&tr->otr_lock);
+       spin_lock(&tr->otr_lock);
        cfs_list_del_init(&d->opd_syn_ontrack);
-       cfs_spin_unlock(&tr->otr_lock);
+       spin_unlock(&tr->otr_lock);
 }
 
index f84484e..b560279 100644 (file)
@@ -1383,14 +1383,14 @@ static int ost_llog_handle_connect(struct obd_export *exp,
         RETURN(rc);
 }
 
-#define ost_init_sec_none(reply, exp)                                   \
-do {                                                                    \
-        reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |          \
-                                      OBD_CONNECT_RMT_CLIENT_FORCE |    \
-                                      OBD_CONNECT_OSS_CAPA);            \
-        cfs_spin_lock(&exp->exp_lock);                                  \
-        exp->exp_connect_flags = reply->ocd_connect_flags;              \
-        cfs_spin_unlock(&exp->exp_lock);                                \
+#define ost_init_sec_none(reply, exp)                                  \
+do {                                                                   \
+       reply->ocd_connect_flags &= ~(OBD_CONNECT_RMT_CLIENT |          \
+                                     OBD_CONNECT_RMT_CLIENT_FORCE |    \
+                                     OBD_CONNECT_OSS_CAPA);            \
+       spin_lock(&exp->exp_lock);                                      \
+       exp->exp_connect_flags = reply->ocd_connect_flags;              \
+       spin_unlock(&exp->exp_lock);                                    \
 } while (0)
 
 static int ost_init_sec_level(struct ptlrpc_request *req)
@@ -1487,9 +1487,9 @@ static int ost_init_sec_level(struct ptlrpc_request *req)
                         if (!filter->fo_fl_oss_capa)
                                 reply->ocd_connect_flags &= ~OBD_CONNECT_OSS_CAPA;
 
-                        cfs_spin_lock(&exp->exp_lock);
-                        exp->exp_connect_flags = reply->ocd_connect_flags;
-                        cfs_spin_unlock(&exp->exp_lock);
+                       spin_lock(&exp->exp_lock);
+                       exp->exp_connect_flags = reply->ocd_connect_flags;
+                       spin_unlock(&exp->exp_lock);
                 }
                 break;
         default:
@@ -1522,14 +1522,14 @@ static int ost_connect_check_sptlrpc(struct ptlrpc_request *req)
         }
 
         if (exp->exp_flvr.sf_rpc == SPTLRPC_FLVR_INVALID) {
-                cfs_read_lock(&filter->fo_sptlrpc_lock);
-                sptlrpc_target_choose_flavor(&filter->fo_sptlrpc_rset,
-                                             req->rq_sp_from,
-                                             req->rq_peer.nid,
-                                             &flvr);
-                cfs_read_unlock(&filter->fo_sptlrpc_lock);
+               read_lock(&filter->fo_sptlrpc_lock);
+               sptlrpc_target_choose_flavor(&filter->fo_sptlrpc_rset,
+                                            req->rq_sp_from,
+                                            req->rq_peer.nid,
+                                            &flvr);
+               read_unlock(&filter->fo_sptlrpc_lock);
 
-                cfs_spin_lock(&exp->exp_lock);
+               spin_lock(&exp->exp_lock);
 
                 exp->exp_sp_peer = req->rq_sp_from;
                 exp->exp_flvr = flvr;
@@ -1543,7 +1543,7 @@ static int ost_connect_check_sptlrpc(struct ptlrpc_request *req)
                         rc = -EACCES;
                 }
 
-                cfs_spin_unlock(&exp->exp_lock);
+               spin_unlock(&exp->exp_lock);
         } else {
                 if (exp->exp_sp_peer != req->rq_sp_from) {
                         CERROR("RPC source %s doesn't match %s\n",
@@ -1792,7 +1792,7 @@ static void ost_prolong_locks(struct ost_prolong_data *data)
         }
 
 
-        cfs_spin_lock_bh(&exp->exp_bl_list_lock);
+       spin_lock_bh(&exp->exp_bl_list_lock);
         cfs_list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) {
                 LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
                 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
@@ -1806,9 +1806,9 @@ static void ost_prolong_locks(struct ost_prolong_data *data)
 
                 ost_prolong_lock_one(data, lock);
         }
-        cfs_spin_unlock_bh(&exp->exp_bl_list_lock);
+       spin_unlock_bh(&exp->exp_bl_list_lock);
 
-        EXIT;
+       EXIT;
 }
 
 /**
@@ -2456,7 +2456,7 @@ static int ost_setup(struct obd_device *obd, struct lustre_cfg* lcfg)
         lprocfs_ost_init_vars(&lvars);
         lprocfs_obd_setup(obd, lvars.obd_vars);
 
-        cfs_mutex_init(&ost->ost_health_mutex);
+       mutex_init(&ost->ost_health_mutex);
 
        svc_conf = (typeof(svc_conf)) {
                .psc_name               = LUSTRE_OSS_NAME,
@@ -2629,7 +2629,7 @@ static int ost_cleanup(struct obd_device *obd)
         /* there is no recovery for OST OBD, all recovery is controlled by
          * obdfilter OBD */
         LASSERT(obd->obd_recovering == 0);
-        cfs_mutex_lock(&ost->ost_health_mutex);
+       mutex_lock(&ost->ost_health_mutex);
         ptlrpc_unregister_service(ost->ost_service);
         ptlrpc_unregister_service(ost->ost_create_service);
         ptlrpc_unregister_service(ost->ost_io_service);
@@ -2637,7 +2637,7 @@ static int ost_cleanup(struct obd_device *obd)
         ost->ost_create_service = NULL;
        ost->ost_io_service = NULL;
 
-       cfs_mutex_unlock(&ost->ost_health_mutex);
+       mutex_unlock(&ost->ost_health_mutex);
 
        lprocfs_obd_cleanup(obd);
 
@@ -2654,11 +2654,11 @@ static int ost_health_check(const struct lu_env *env, struct obd_device *obd)
         struct ost_obd *ost = &obd->u.ost;
         int rc = 0;
 
-        cfs_mutex_lock(&ost->ost_health_mutex);
+       mutex_lock(&ost->ost_health_mutex);
         rc |= ptlrpc_service_health_check(ost->ost_service);
         rc |= ptlrpc_service_health_check(ost->ost_create_service);
         rc |= ptlrpc_service_health_check(ost->ost_io_service);
-        cfs_mutex_unlock(&ost->ost_health_mutex);
+       mutex_unlock(&ost->ost_health_mutex);
 
         /*
          * health_check to return 0 on healthy
index 4150a78..eea2337 100644 (file)
@@ -109,7 +109,7 @@ struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
         if (!desc)
                 return NULL;
 
-        cfs_spin_lock_init(&desc->bd_lock);
+       spin_lock_init(&desc->bd_lock);
         cfs_waitq_init(&desc->bd_waitq);
         desc->bd_max_iov = npages;
         desc->bd_iov_count = 0;
@@ -335,11 +335,11 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
         ENTRY;
 
         req->rq_early = 0;
-        cfs_spin_unlock(&req->rq_lock);
+       spin_unlock(&req->rq_lock);
 
-        rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
-        if (rc) {
-                cfs_spin_lock(&req->rq_lock);
+       rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
+       if (rc) {
+               spin_lock(&req->rq_lock);
                 RETURN(rc);
         }
 
@@ -354,7 +354,7 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
 
         sptlrpc_cli_finish_early_reply(early_req);
 
-        cfs_spin_lock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
 
         if (rc == 0) {
                 /* Adjust the local timeout for this req */
@@ -383,22 +383,22 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
  */
 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
 {
-        cfs_list_t *l, *tmp;
-        struct ptlrpc_request *req;
+       cfs_list_t *l, *tmp;
+       struct ptlrpc_request *req;
 
-        LASSERT(pool != NULL);
+       LASSERT(pool != NULL);
 
-        cfs_spin_lock(&pool->prp_lock);
-        cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
-                req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
-                cfs_list_del(&req->rq_list);
-                LASSERT(req->rq_reqbuf);
-                LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
-                OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
-                OBD_FREE(req, sizeof(*req));
-        }
-        cfs_spin_unlock(&pool->prp_lock);
-        OBD_FREE(pool, sizeof(*pool));
+       spin_lock(&pool->prp_lock);
+       cfs_list_for_each_safe(l, tmp, &pool->prp_req_list) {
+               req = cfs_list_entry(l, struct ptlrpc_request, rq_list);
+               cfs_list_del(&req->rq_list);
+               LASSERT(req->rq_reqbuf);
+               LASSERT(req->rq_reqbuf_len == pool->prp_rq_size);
+               OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size);
+               OBD_FREE(req, sizeof(*req));
+       }
+       spin_unlock(&pool->prp_lock);
+       OBD_FREE(pool, sizeof(*pool));
 }
 EXPORT_SYMBOL(ptlrpc_free_rq_pool);
 
@@ -418,13 +418,13 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
                  "Trying to change pool size with nonempty pool "
                  "from %d to %d bytes\n", pool->prp_rq_size, size);
 
-        cfs_spin_lock(&pool->prp_lock);
-        pool->prp_rq_size = size;
-        for (i = 0; i < num_rq; i++) {
-                struct ptlrpc_request *req;
-                struct lustre_msg *msg;
+       spin_lock(&pool->prp_lock);
+       pool->prp_rq_size = size;
+       for (i = 0; i < num_rq; i++) {
+               struct ptlrpc_request *req;
+               struct lustre_msg *msg;
 
-                cfs_spin_unlock(&pool->prp_lock);
+               spin_unlock(&pool->prp_lock);
                 OBD_ALLOC(req, sizeof(struct ptlrpc_request));
                 if (!req)
                         return;
@@ -436,11 +436,11 @@ void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
                 req->rq_reqbuf = msg;
                 req->rq_reqbuf_len = size;
                 req->rq_pool = pool;
-                cfs_spin_lock(&pool->prp_lock);
-                cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
-        }
-        cfs_spin_unlock(&pool->prp_lock);
-        return;
+               spin_lock(&pool->prp_lock);
+               cfs_list_add_tail(&req->rq_list, &pool->prp_req_list);
+       }
+       spin_unlock(&pool->prp_lock);
+       return;
 }
 EXPORT_SYMBOL(ptlrpc_add_rqs_to_pool);
 
@@ -465,7 +465,7 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
         /* Request next power of two for the allocation, because internally
            kernel would do exactly this */
 
-        cfs_spin_lock_init(&pool->prp_lock);
+       spin_lock_init(&pool->prp_lock);
         CFS_INIT_LIST_HEAD(&pool->prp_req_list);
         pool->prp_rq_size = msgsize + SPTLRPC_MAX_PAYLOAD;
         pool->prp_populate = populate_pool;
@@ -493,21 +493,21 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
         if (!pool)
                 return NULL;
 
-        cfs_spin_lock(&pool->prp_lock);
+       spin_lock(&pool->prp_lock);
 
-        /* See if we have anything in a pool, and bail out if nothing,
-         * in writeout path, where this matters, this is safe to do, because
-         * nothing is lost in this case, and when some in-flight requests
-         * complete, this code will be called again. */
-        if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
-                cfs_spin_unlock(&pool->prp_lock);
-                return NULL;
-        }
+       /* See if we have anything in a pool, and bail out if nothing,
+        * in writeout path, where this matters, this is safe to do, because
+        * nothing is lost in this case, and when some in-flight requests
+        * complete, this code will be called again. */
+       if (unlikely(cfs_list_empty(&pool->prp_req_list))) {
+               spin_unlock(&pool->prp_lock);
+               return NULL;
+       }
 
-        request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
-                                 rq_list);
-        cfs_list_del_init(&request->rq_list);
-        cfs_spin_unlock(&pool->prp_lock);
+       request = cfs_list_entry(pool->prp_req_list.next, struct ptlrpc_request,
+                                rq_list);
+       cfs_list_del_init(&request->rq_list);
+       spin_unlock(&pool->prp_lock);
 
         LASSERT(request->rq_reqbuf);
         LASSERT(request->rq_pool);
@@ -526,13 +526,13 @@ ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
  */
 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
 {
-        struct ptlrpc_request_pool *pool = request->rq_pool;
+       struct ptlrpc_request_pool *pool = request->rq_pool;
 
-        cfs_spin_lock(&pool->prp_lock);
-        LASSERT(cfs_list_empty(&request->rq_list));
-        LASSERT(!request->rq_receiving_reply);
-        cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
-        cfs_spin_unlock(&pool->prp_lock);
+       spin_lock(&pool->prp_lock);
+       LASSERT(cfs_list_empty(&request->rq_list));
+       LASSERT(!request->rq_receiving_reply);
+       cfs_list_add_tail(&request->rq_list, &pool->prp_req_list);
+       spin_unlock(&pool->prp_lock);
 }
 
 static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
@@ -581,7 +581,7 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
 
         ptlrpc_at_set_req_timeout(request);
 
-        cfs_spin_lock_init(&request->rq_lock);
+       spin_lock_init(&request->rq_lock);
         CFS_INIT_LIST_HEAD(&request->rq_list);
         CFS_INIT_LIST_HEAD(&request->rq_timed_list);
         CFS_INIT_LIST_HEAD(&request->rq_replay_list);
@@ -826,7 +826,7 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void)
        cfs_waitq_init(&set->set_waitq);
        cfs_atomic_set(&set->set_new_count, 0);
        cfs_atomic_set(&set->set_remaining, 0);
-       cfs_spin_lock_init(&set->set_new_req_lock);
+       spin_lock_init(&set->set_new_req_lock);
        CFS_INIT_LIST_HEAD(&set->set_new_requests);
        CFS_INIT_LIST_HEAD(&set->set_cblist);
        set->set_max_inflight = UINT_MAX;
@@ -909,10 +909,10 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
                         cfs_atomic_dec(&set->set_remaining);
                 }
 
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_set = NULL;
-                req->rq_invalid_rqset = 0;
-                cfs_spin_unlock(&req->rq_lock);
+               spin_lock(&req->rq_lock);
+               req->rq_set = NULL;
+               req->rq_invalid_rqset = 0;
+               spin_unlock(&req->rq_lock);
 
                 ptlrpc_req_finished (req);
         }
@@ -983,17 +983,17 @@ void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
         int count, i;
 
         LASSERT(req->rq_set == NULL);
-        LASSERT(cfs_test_bit(LIOD_STOP, &pc->pc_flags) == 0);
+       LASSERT(test_bit(LIOD_STOP, &pc->pc_flags) == 0);
 
-        cfs_spin_lock(&set->set_new_req_lock);
-        /*
-         * The set takes over the caller's request reference.
-         */
-        req->rq_set = set;
-        req->rq_queued_time = cfs_time_current();
-        cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
-        count = cfs_atomic_inc_return(&set->set_new_count);
-        cfs_spin_unlock(&set->set_new_req_lock);
+       spin_lock(&set->set_new_req_lock);
+       /*
+        * The set takes over the caller's request reference.
+        */
+       req->rq_set = set;
+       req->rq_queued_time = cfs_time_current();
+       cfs_list_add_tail(&req->rq_set_chain, &set->set_new_requests);
+       count = cfs_atomic_inc_return(&set->set_new_count);
+       spin_unlock(&set->set_new_req_lock);
 
         /* Only need to call wakeup once for the first entry. */
         if (count == 1) {
@@ -1291,7 +1291,7 @@ static int after_reply(struct ptlrpc_request *req)
         }
 
         if (imp->imp_replayable) {
-                cfs_spin_lock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
                 /*
                  * No point in adding already-committed requests to the replay
                  * list, we will just remove them immediately. b=9829
@@ -1304,9 +1304,9 @@ static int after_reply(struct ptlrpc_request *req)
                         ptlrpc_save_versions(req);
                         ptlrpc_retain_replayable_request(req, imp);
                 } else if (req->rq_commit_cb != NULL) {
-                        cfs_spin_unlock(&imp->imp_lock);
-                        req->rq_commit_cb(req);
-                        cfs_spin_lock(&imp->imp_lock);
+                       spin_unlock(&imp->imp_lock);
+                       req->rq_commit_cb(req);
+                       spin_lock(&imp->imp_lock);
                 }
 
                 /*
@@ -1321,10 +1321,10 @@ static int after_reply(struct ptlrpc_request *req)
                 if (req->rq_transno > imp->imp_peer_committed_transno)
                         ptlrpc_pinger_commit_expected(imp);
 
-                cfs_spin_unlock(&imp->imp_lock);
-        }
+               spin_unlock(&imp->imp_lock);
+       }
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 /**
@@ -1346,38 +1346,38 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
 
         ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
 
-        cfs_spin_lock(&imp->imp_lock);
-
-        if (!req->rq_generation_set)
-                req->rq_import_generation = imp->imp_generation;
-
-        if (ptlrpc_import_delay_req(imp, req, &rc)) {
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_waiting = 1;
-                cfs_spin_unlock(&req->rq_lock);
-
-                DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
-                          "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
-                          ptlrpc_import_state_name(req->rq_send_state),
-                          ptlrpc_import_state_name(imp->imp_state));
-                LASSERT(cfs_list_empty(&req->rq_list));
-                cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
-                cfs_atomic_inc(&req->rq_import->imp_inflight);
-                cfs_spin_unlock(&imp->imp_lock);
-                RETURN(0);
-        }
+       spin_lock(&imp->imp_lock);
+
+       if (!req->rq_generation_set)
+               req->rq_import_generation = imp->imp_generation;
+
+       if (ptlrpc_import_delay_req(imp, req, &rc)) {
+               spin_lock(&req->rq_lock);
+               req->rq_waiting = 1;
+               spin_unlock(&req->rq_lock);
+
+               DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
+                         "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
+                         ptlrpc_import_state_name(req->rq_send_state),
+                         ptlrpc_import_state_name(imp->imp_state));
+               LASSERT(cfs_list_empty(&req->rq_list));
+               cfs_list_add_tail(&req->rq_list, &imp->imp_delayed_list);
+               cfs_atomic_inc(&req->rq_import->imp_inflight);
+               spin_unlock(&imp->imp_lock);
+               RETURN(0);
+       }
 
-        if (rc != 0) {
-                cfs_spin_unlock(&imp->imp_lock);
-                req->rq_status = rc;
-                ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
-                RETURN(rc);
-        }
+       if (rc != 0) {
+               spin_unlock(&imp->imp_lock);
+               req->rq_status = rc;
+               ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
+               RETURN(rc);
+       }
 
-        LASSERT(cfs_list_empty(&req->rq_list));
-        cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
-        cfs_atomic_inc(&req->rq_import->imp_inflight);
-        cfs_spin_unlock(&imp->imp_lock);
+       LASSERT(cfs_list_empty(&req->rq_list));
+       cfs_list_add_tail(&req->rq_list, &imp->imp_sending_list);
+       cfs_atomic_inc(&req->rq_import->imp_inflight);
+       spin_unlock(&imp->imp_lock);
 
         lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
 
@@ -1543,9 +1543,9 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                 }
 
                 if (req->rq_err) {
-                        cfs_spin_lock(&req->rq_lock);
-                        req->rq_replied = 0;
-                        cfs_spin_unlock(&req->rq_lock);
+                       spin_lock(&req->rq_lock);
+                       req->rq_replied = 0;
+                       spin_unlock(&req->rq_lock);
                         if (req->rq_status == 0)
                                 req->rq_status = -EIO;
                         ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
@@ -1576,15 +1576,15 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                 if (!ptlrpc_unregister_reply(req, 1))
                                         continue;
 
-                                cfs_spin_lock(&imp->imp_lock);
-                                if (ptlrpc_import_delay_req(imp, req, &status)){
-                                        /* put on delay list - only if we wait
-                                         * recovery finished - before send */
-                                        cfs_list_del_init(&req->rq_list);
-                                        cfs_list_add_tail(&req->rq_list,
-                                                          &imp-> \
-                                                          imp_delayed_list);
-                                        cfs_spin_unlock(&imp->imp_lock);
+                               spin_lock(&imp->imp_lock);
+                               if (ptlrpc_import_delay_req(imp, req, &status)){
+                                       /* put on delay list - only if we wait
+                                        * recovery finished - before send */
+                                       cfs_list_del_init(&req->rq_list);
+                                       cfs_list_add_tail(&req->rq_list,
+                                                         &imp->
+                                                         imp_delayed_list);
+                                       spin_unlock(&imp->imp_lock);
                                         continue;
                                 }
 
@@ -1592,33 +1592,34 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                         req->rq_status = status;
                                         ptlrpc_rqphase_move(req,
                                                 RQ_PHASE_INTERPRET);
-                                        cfs_spin_unlock(&imp->imp_lock);
-                                        GOTO(interpret, req->rq_status);
-                                }
-                                if (ptlrpc_no_resend(req) && !req->rq_wait_ctx) {
-                                        req->rq_status = -ENOTCONN;
-                                        ptlrpc_rqphase_move(req,
-                                                RQ_PHASE_INTERPRET);
-                                        cfs_spin_unlock(&imp->imp_lock);
-                                        GOTO(interpret, req->rq_status);
-                                }
-
-                                cfs_list_del_init(&req->rq_list);
-                                cfs_list_add_tail(&req->rq_list,
-                                              &imp->imp_sending_list);
-
-                                cfs_spin_unlock(&imp->imp_lock);
-
-                                cfs_spin_lock(&req->rq_lock);
-                                req->rq_waiting = 0;
-                                cfs_spin_unlock(&req->rq_lock);
-
-                                if (req->rq_timedout || req->rq_resend) {
-                                        /* This is re-sending anyways,
-                                         * let's mark req as resend. */
-                                        cfs_spin_lock(&req->rq_lock);
-                                        req->rq_resend = 1;
-                                        cfs_spin_unlock(&req->rq_lock);
+                                       spin_unlock(&imp->imp_lock);
+                                       GOTO(interpret, req->rq_status);
+                               }
+                               if (ptlrpc_no_resend(req) &&
+                                   !req->rq_wait_ctx) {
+                                       req->rq_status = -ENOTCONN;
+                                       ptlrpc_rqphase_move(req,
+                                                           RQ_PHASE_INTERPRET);
+                                       spin_unlock(&imp->imp_lock);
+                                       GOTO(interpret, req->rq_status);
+                               }
+
+                               cfs_list_del_init(&req->rq_list);
+                               cfs_list_add_tail(&req->rq_list,
+                                                 &imp->imp_sending_list);
+
+                               spin_unlock(&imp->imp_lock);
+
+                               spin_lock(&req->rq_lock);
+                               req->rq_waiting = 0;
+                               spin_unlock(&req->rq_lock);
+
+                               if (req->rq_timedout || req->rq_resend) {
+                                       /* This is re-sending anyways,
+                                        * let's mark req as resend. */
+                                       spin_lock(&req->rq_lock);
+                                       req->rq_resend = 1;
+                                       spin_unlock(&req->rq_lock);
                                         if (req->rq_bulk) {
                                                 __u64 old_xid;
 
@@ -1642,57 +1643,57 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                                 if (status) {
                                         if (req->rq_err) {
                                                 req->rq_status = status;
-                                                cfs_spin_lock(&req->rq_lock);
-                                                req->rq_wait_ctx = 0;
-                                                cfs_spin_unlock(&req->rq_lock);
-                                                force_timer_recalc = 1;
-                                        } else {
-                                                cfs_spin_lock(&req->rq_lock);
-                                                req->rq_wait_ctx = 1;
-                                                cfs_spin_unlock(&req->rq_lock);
-                                        }
-
-                                        continue;
-                                } else {
-                                        cfs_spin_lock(&req->rq_lock);
-                                        req->rq_wait_ctx = 0;
-                                        cfs_spin_unlock(&req->rq_lock);
-                                }
+                                               spin_lock(&req->rq_lock);
+                                               req->rq_wait_ctx = 0;
+                                               spin_unlock(&req->rq_lock);
+                                               force_timer_recalc = 1;
+                                       } else {
+                                               spin_lock(&req->rq_lock);
+                                               req->rq_wait_ctx = 1;
+                                               spin_unlock(&req->rq_lock);
+                                       }
+
+                                       continue;
+                               } else {
+                                       spin_lock(&req->rq_lock);
+                                       req->rq_wait_ctx = 0;
+                                       spin_unlock(&req->rq_lock);
+                               }
+
+                               rc = ptl_send_rpc(req, 0);
+                               if (rc) {
+                                       DEBUG_REQ(D_HA, req,
+                                                 "send failed: rc = %d", rc);
+                                       force_timer_recalc = 1;
+                                       spin_lock(&req->rq_lock);
+                                       req->rq_net_err = 1;
+                                       spin_unlock(&req->rq_lock);
+                               }
+                               /* need to reset the timeout */
+                               force_timer_recalc = 1;
+                       }
 
-                                rc = ptl_send_rpc(req, 0);
-                                if (rc) {
-                                        DEBUG_REQ(D_HA, req, "send failed (%d)",
-                                                  rc);
-                                        force_timer_recalc = 1;
-                                        cfs_spin_lock(&req->rq_lock);
-                                        req->rq_net_err = 1;
-                                        cfs_spin_unlock(&req->rq_lock);
-                                }
-                                /* need to reset the timeout */
-                                force_timer_recalc = 1;
-                        }
+                       spin_lock(&req->rq_lock);
 
-                        cfs_spin_lock(&req->rq_lock);
+                       if (ptlrpc_client_early(req)) {
+                               ptlrpc_at_recv_early_reply(req);
+                               spin_unlock(&req->rq_lock);
+                               continue;
+                       }
 
-                        if (ptlrpc_client_early(req)) {
-                                ptlrpc_at_recv_early_reply(req);
-                                cfs_spin_unlock(&req->rq_lock);
-                                continue;
-                        }
+                       /* Still waiting for a reply? */
+                       if (ptlrpc_client_recv(req)) {
+                               spin_unlock(&req->rq_lock);
+                               continue;
+                       }
 
-                        /* Still waiting for a reply? */
-                        if (ptlrpc_client_recv(req)) {
-                                cfs_spin_unlock(&req->rq_lock);
-                                continue;
-                        }
+                       /* Did we actually receive a reply? */
+                       if (!ptlrpc_client_replied(req)) {
+                               spin_unlock(&req->rq_lock);
+                               continue;
+                       }
 
-                        /* Did we actually receive a reply? */
-                        if (!ptlrpc_client_replied(req)) {
-                                cfs_spin_unlock(&req->rq_lock);
-                                continue;
-                        }
-
-                        cfs_spin_unlock(&req->rq_lock);
+                       spin_unlock(&req->rq_lock);
 
                         /* unlink from net because we are going to
                          * swab in-place of reply buffer */
@@ -1762,16 +1763,16 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                        libcfs_nid2str(imp->imp_connection->c_peer.nid),
                        lustre_msg_get_opc(req->rq_reqmsg));
 
-                cfs_spin_lock(&imp->imp_lock);
-                /* Request already may be not on sending or delaying list. This
-                 * may happen in the case of marking it erroneous for the case
-                 * ptlrpc_import_delay_req(req, status) find it impossible to
-                 * allow sending this rpc and returns *status != 0. */
-                if (!cfs_list_empty(&req->rq_list)) {
-                        cfs_list_del_init(&req->rq_list);
-                        cfs_atomic_dec(&imp->imp_inflight);
-                }
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               /* Request already may be not on sending or delaying list. This
+                * may happen in the case of marking it erroneous for the case
+                * ptlrpc_import_delay_req(req, status) find it impossible to
+                * allow sending this rpc and returns *status != 0. */
+               if (!cfs_list_empty(&req->rq_list)) {
+                       cfs_list_del_init(&req->rq_list);
+                       cfs_atomic_dec(&imp->imp_inflight);
+               }
+               spin_unlock(&imp->imp_lock);
 
                 cfs_atomic_dec(&set->set_remaining);
                 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
@@ -1784,10 +1785,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                        /* free the request that has just been completed
                         * in order not to pollute set->set_requests */
                        cfs_list_del_init(&req->rq_set_chain);
-                       cfs_spin_lock(&req->rq_lock);
+                       spin_lock(&req->rq_lock);
                        req->rq_set = NULL;
                        req->rq_invalid_rqset = 0;
-                       cfs_spin_unlock(&req->rq_lock);
+                       spin_unlock(&req->rq_lock);
 
                        /* record rq_status to compute the final status later */
                        if (req->rq_status != 0)
@@ -1808,13 +1809,13 @@ EXPORT_SYMBOL(ptlrpc_check_set);
  */
 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
 {
-        struct obd_import *imp = req->rq_import;
-        int rc = 0;
-        ENTRY;
+       struct obd_import *imp = req->rq_import;
+       int rc = 0;
+       ENTRY;
 
-        cfs_spin_lock(&req->rq_lock);
-        req->rq_timedout = 1;
-        cfs_spin_unlock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
+       req->rq_timedout = 1;
+       spin_unlock(&req->rq_lock);
 
        DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
                  "/real "CFS_DURATION_T"]",
@@ -1853,11 +1854,11 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
                 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
                           ptlrpc_import_state_name(req->rq_send_state),
                           ptlrpc_import_state_name(imp->imp_state));
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_status = -ETIMEDOUT;
-                req->rq_err = 1;
-                cfs_spin_unlock(&req->rq_lock);
-                RETURN(1);
+               spin_lock(&req->rq_lock);
+               req->rq_status = -ETIMEDOUT;
+               req->rq_err = 1;
+               spin_unlock(&req->rq_lock);
+               RETURN(1);
         }
 
         /* if a request can't be resent we can't wait for an answer after
@@ -1927,9 +1928,9 @@ EXPORT_SYMBOL(ptlrpc_expired_set);
  */
 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
 {
-        cfs_spin_lock(&req->rq_lock);
-        req->rq_intr = 1;
-        cfs_spin_unlock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
+       req->rq_intr = 1;
+       spin_unlock(&req->rq_lock);
 }
 EXPORT_SYMBOL(ptlrpc_mark_interrupted);
 
@@ -2098,9 +2099,9 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
                         cfs_list_for_each(tmp, &set->set_requests) {
                                 req = cfs_list_entry(tmp, struct ptlrpc_request,
                                                      rq_set_chain);
-                                cfs_spin_lock(&req->rq_lock);
-                                req->rq_invalid_rqset = 1;
-                                cfs_spin_unlock(&req->rq_lock);
+                               spin_lock(&req->rq_lock);
+                               req->rq_invalid_rqset = 1;
+                               spin_unlock(&req->rq_lock);
                         }
                 }
         } while (rc != 0 || cfs_atomic_read(&set->set_remaining) != 0);
@@ -2166,11 +2167,11 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
         /* We must take it off the imp_replay_list first.  Otherwise, we'll set
          * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
         if (request->rq_import != NULL) {
-                if (!locked)
-                        cfs_spin_lock(&request->rq_import->imp_lock);
-                cfs_list_del_init(&request->rq_replay_list);
-                if (!locked)
-                        cfs_spin_unlock(&request->rq_import->imp_lock);
+               if (!locked)
+                       spin_lock(&request->rq_import->imp_lock);
+               cfs_list_del_init(&request->rq_replay_list);
+               if (!locked)
+                       spin_unlock(&request->rq_import->imp_lock);
         }
         LASSERTF(cfs_list_empty(&request->rq_replay_list), "req %p\n", request);
 
@@ -2411,9 +2412,9 @@ void ptlrpc_free_committed(struct obd_import *imp)
                 DEBUG_REQ(D_INFO, req, "commit (last_committed "LPU64")",
                           imp->imp_peer_committed_transno);
 free_req:
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_replay = 0;
-                cfs_spin_unlock(&req->rq_lock);
+               spin_lock(&req->rq_lock);
+               req->rq_replay = 0;
+               spin_unlock(&req->rq_lock);
                 if (req->rq_commit_cb != NULL)
                         req->rq_commit_cb(req);
                 cfs_list_del_init(&req->rq_replay_list);
@@ -2444,7 +2445,7 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
         lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
         req->rq_status = -EAGAIN;
 
-        cfs_spin_lock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
         req->rq_resend = 1;
         req->rq_net_err = 0;
         req->rq_timedout = 0;
@@ -2457,21 +2458,21 @@ void ptlrpc_resend_req(struct ptlrpc_request *req)
                        old_xid, req->rq_xid);
         }
         ptlrpc_client_wake_req(req);
-        cfs_spin_unlock(&req->rq_lock);
+       spin_unlock(&req->rq_lock);
 }
 EXPORT_SYMBOL(ptlrpc_resend_req);
 
 /* XXX: this function and rq_status are currently unused */
 void ptlrpc_restart_req(struct ptlrpc_request *req)
 {
-        DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
-        req->rq_status = -ERESTARTSYS;
+       DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
+       req->rq_status = -ERESTARTSYS;
 
-        cfs_spin_lock(&req->rq_lock);
-        req->rq_restart = 1;
-        req->rq_timedout = 0;
-        ptlrpc_client_wake_req(req);
-        cfs_spin_unlock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
+       req->rq_restart = 1;
+       req->rq_timedout = 0;
+       ptlrpc_client_wake_req(req);
+       spin_unlock(&req->rq_lock);
 }
 EXPORT_SYMBOL(ptlrpc_restart_req);
 
@@ -2607,13 +2608,13 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
 
         /** VBR: check version failure */
         if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
-                /** replay was failed due to version mismatch */
-                DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_vbr_failed = 1;
-                imp->imp_no_lock_replay = 1;
-                cfs_spin_unlock(&imp->imp_lock);
-                lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
+               /** replay was failed due to version mismatch */
+               DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
+               spin_lock(&imp->imp_lock);
+               imp->imp_vbr_failed = 1;
+               imp->imp_no_lock_replay = 1;
+               spin_unlock(&imp->imp_lock);
+               lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
         } else {
                 /** The transno had better not change over replay. */
                 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) ==
@@ -2624,12 +2625,12 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
                          lustre_msg_get_transno(req->rq_repmsg));
         }
 
-        cfs_spin_lock(&imp->imp_lock);
-        /** if replays by version then gap was occur on server, no trust to locks */
-        if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
-                imp->imp_no_lock_replay = 1;
-        imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       /** if replays by version then gap occur on server, no trust to locks */
+       if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY)
+               imp->imp_no_lock_replay = 1;
+       imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
+       spin_unlock(&imp->imp_lock);
         LASSERT(imp->imp_last_replay_transno);
 
         /* transaction number shouldn't be bigger than the latest replayed */
@@ -2730,7 +2731,7 @@ void ptlrpc_abort_inflight(struct obd_import *imp)
          * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
          * this flag and then putting requests on sending_list or delayed_list.
          */
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
 
         /* XXX locking?  Maybe we should remove each request with the list
          * locked?  Also, how do we know if the requests on the list are
@@ -2742,38 +2743,38 @@ void ptlrpc_abort_inflight(struct obd_import *imp)
 
                 DEBUG_REQ(D_RPCTRACE, req, "inflight");
 
-                cfs_spin_lock (&req->rq_lock);
-                if (req->rq_import_generation < imp->imp_generation) {
-                        req->rq_err = 1;
+               spin_lock(&req->rq_lock);
+               if (req->rq_import_generation < imp->imp_generation) {
+                       req->rq_err = 1;
                        req->rq_status = -EIO;
-                        ptlrpc_client_wake_req(req);
-                }
-                cfs_spin_unlock (&req->rq_lock);
-        }
+                       ptlrpc_client_wake_req(req);
+               }
+               spin_unlock(&req->rq_lock);
+       }
 
-        cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
-                struct ptlrpc_request *req =
-                        cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+       cfs_list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
+               struct ptlrpc_request *req =
+                       cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
 
-                DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
+               DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
 
-                cfs_spin_lock (&req->rq_lock);
-                if (req->rq_import_generation < imp->imp_generation) {
-                        req->rq_err = 1;
+               spin_lock(&req->rq_lock);
+               if (req->rq_import_generation < imp->imp_generation) {
+                       req->rq_err = 1;
                        req->rq_status = -EIO;
-                        ptlrpc_client_wake_req(req);
-                }
-                cfs_spin_unlock (&req->rq_lock);
-        }
+                       ptlrpc_client_wake_req(req);
+               }
+               spin_unlock(&req->rq_lock);
+       }
 
-        /* Last chance to free reqs left on the replay list, but we
-         * will still leak reqs that haven't committed.  */
-        if (imp->imp_replayable)
-                ptlrpc_free_committed(imp);
+       /* Last chance to free reqs left on the replay list, but we
+        * will still leak reqs that haven't committed.  */
+       if (imp->imp_replayable)
+               ptlrpc_free_committed(imp);
 
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(ptlrpc_abort_inflight);
 
@@ -2791,21 +2792,21 @@ void ptlrpc_abort_set(struct ptlrpc_request_set *set)
                         cfs_list_entry(pos, struct ptlrpc_request,
                                        rq_set_chain);
 
-                cfs_spin_lock(&req->rq_lock);
-                if (req->rq_phase != RQ_PHASE_RPC) {
-                        cfs_spin_unlock(&req->rq_lock);
-                        continue;
-                }
+               spin_lock(&req->rq_lock);
+               if (req->rq_phase != RQ_PHASE_RPC) {
+                       spin_unlock(&req->rq_lock);
+                       continue;
+               }
 
-                req->rq_err = 1;
-                req->rq_status = -EINTR;
-                ptlrpc_client_wake_req(req);
-                cfs_spin_unlock(&req->rq_lock);
-        }
+               req->rq_err = 1;
+               req->rq_status = -EINTR;
+               ptlrpc_client_wake_req(req);
+               spin_unlock(&req->rq_lock);
+       }
 }
 
 static __u64 ptlrpc_last_xid;
-static cfs_spinlock_t ptlrpc_last_xid_lock;
+static spinlock_t ptlrpc_last_xid_lock;
 
 /**
  * Initialize the XID for the node.  This is common among all requests on
@@ -2827,7 +2828,7 @@ void ptlrpc_init_xid(void)
 {
         time_t now = cfs_time_current_sec();
 
-        cfs_spin_lock_init(&ptlrpc_last_xid_lock);
+       spin_lock_init(&ptlrpc_last_xid_lock);
         if (now < YEAR_2004) {
                 cfs_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
                 ptlrpc_last_xid >>= 2;
@@ -2842,11 +2843,11 @@ void ptlrpc_init_xid(void)
  */
 __u64 ptlrpc_next_xid(void)
 {
-        __u64 tmp;
-        cfs_spin_lock(&ptlrpc_last_xid_lock);
-        tmp = ++ptlrpc_last_xid;
-        cfs_spin_unlock(&ptlrpc_last_xid_lock);
-        return tmp;
+       __u64 tmp;
+       spin_lock(&ptlrpc_last_xid_lock);
+       tmp = ++ptlrpc_last_xid;
+       spin_unlock(&ptlrpc_last_xid_lock);
+       return tmp;
 }
 EXPORT_SYMBOL(ptlrpc_next_xid);
 
@@ -2857,15 +2858,15 @@ EXPORT_SYMBOL(ptlrpc_next_xid);
 __u64 ptlrpc_sample_next_xid(void)
 {
 #if BITS_PER_LONG == 32
-        /* need to avoid possible word tearing on 32-bit systems */
-        __u64 tmp;
-        cfs_spin_lock(&ptlrpc_last_xid_lock);
-        tmp = ptlrpc_last_xid + 1;
-        cfs_spin_unlock(&ptlrpc_last_xid_lock);
-        return tmp;
+       /* need to avoid possible word tearing on 32-bit systems */
+       __u64 tmp;
+       spin_lock(&ptlrpc_last_xid_lock);
+       tmp = ptlrpc_last_xid + 1;
+       spin_unlock(&ptlrpc_last_xid_lock);
+       return tmp;
 #else
-        /* No need to lock, since returned value is racy anyways */
-        return ptlrpc_last_xid + 1;
+       /* No need to lock, since returned value is racy anyways */
+       return ptlrpc_last_xid + 1;
 #endif
 }
 EXPORT_SYMBOL(ptlrpc_sample_next_xid);
@@ -2938,7 +2939,7 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
         req->rq_must_unlink = 0;
         req->rq_no_delay = req->rq_no_resend = 1;
 
-        cfs_spin_lock_init(&req->rq_lock);
+       spin_lock_init(&req->rq_lock);
         CFS_INIT_LIST_HEAD(&req->rq_list);
         CFS_INIT_LIST_HEAD(&req->rq_replay_list);
         CFS_INIT_LIST_HEAD(&req->rq_set_chain);
index 55cc32e..a58a255 100644 (file)
@@ -73,9 +73,9 @@ void request_out_callback(lnet_event_t *ev)
                 /* Failed send: make it seem like the reply timed out, just
                  * like failing sends in client.c does currently...  */
 
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_net_err = 1;
-                cfs_spin_unlock(&req->rq_lock);
+               spin_lock(&req->rq_lock);
+               req->rq_net_err = 1;
+               spin_unlock(&req->rq_lock);
 
                 ptlrpc_client_wake_req(req);
         }
@@ -103,7 +103,7 @@ void reply_in_callback(lnet_event_t *ev)
            for adaptive timeouts' early reply. */
         LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
 
-        cfs_spin_lock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
 
         req->rq_receiving_reply = 0;
         req->rq_early = 0;
@@ -167,8 +167,8 @@ out_wake:
         /* NB don't unlock till after wakeup; req can disappear under us
          * since we don't have our own ref */
         ptlrpc_client_wake_req(req);
-        cfs_spin_unlock(&req->rq_lock);
-        EXIT;
+       spin_unlock(&req->rq_lock);
+       EXIT;
 }
 
 /*
@@ -198,7 +198,7 @@ void client_bulk_callback (lnet_event_t *ev)
                "event type %d, status %d, desc %p\n",
                ev->type, ev->status, desc);
 
-        cfs_spin_lock(&desc->bd_lock);
+       spin_lock(&desc->bd_lock);
         req = desc->bd_req;
         LASSERT(desc->bd_network_rw);
         desc->bd_network_rw = 0;
@@ -209,9 +209,9 @@ void client_bulk_callback (lnet_event_t *ev)
                 desc->bd_sender = ev->sender;
         } else {
                 /* start reconnect and resend if network error hit */
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_net_err = 1;
-                cfs_spin_unlock(&req->rq_lock);
+               spin_lock(&req->rq_lock);
+               req->rq_net_err = 1;
+               spin_unlock(&req->rq_lock);
         }
 
         /* release the encrypted pages for write */
@@ -222,8 +222,8 @@ void client_bulk_callback (lnet_event_t *ev)
          * otherwise */
         ptlrpc_client_wake_req(req);
 
-        cfs_spin_unlock(&desc->bd_lock);
-        EXIT;
+       spin_unlock(&desc->bd_lock);
+       EXIT;
 }
 
 /*
@@ -338,7 +338,7 @@ void request_in_callback(lnet_event_t *ev)
         req->rq_self = ev->target.nid;
         req->rq_rqbd = rqbd;
         req->rq_phase = RQ_PHASE_NEW;
-        cfs_spin_lock_init(&req->rq_lock);
+       spin_lock_init(&req->rq_lock);
         CFS_INIT_LIST_HEAD(&req->rq_timed_list);
        CFS_INIT_LIST_HEAD(&req->rq_exp_list);
         cfs_atomic_set(&req->rq_refcount, 1);
@@ -348,7 +348,7 @@ void request_in_callback(lnet_event_t *ev)
 
         CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
 
        ptlrpc_req_add_history(svcpt, req);
 
@@ -378,7 +378,7 @@ void request_in_callback(lnet_event_t *ev)
         * has been queued and we unlock, so do the wake now... */
        cfs_waitq_signal(&svcpt->scp_waitq);
 
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
        EXIT;
 }
 
@@ -410,8 +410,8 @@ void reply_out_callback(lnet_event_t *ev)
         if (ev->unlinked) {
                 /* Last network callback. The net's ref on 'rs' stays put
                  * until ptlrpc_handle_rs() is done with it */
-               cfs_spin_lock(&svcpt->scp_rep_lock);
-               cfs_spin_lock(&rs->rs_lock);
+               spin_lock(&svcpt->scp_rep_lock);
+               spin_lock(&rs->rs_lock);
 
                rs->rs_on_net = 0;
                if (!rs->rs_no_ack ||
@@ -419,8 +419,8 @@ void reply_out_callback(lnet_event_t *ev)
                    rs->rs_export->exp_obd->obd_last_committed)
                        ptlrpc_schedule_difficult_reply(rs);
 
-               cfs_spin_unlock(&rs->rs_lock);
-               cfs_spin_unlock(&svcpt->scp_rep_lock);
+               spin_unlock(&rs->rs_lock);
+               spin_unlock(&svcpt->scp_rep_lock);
        }
        EXIT;
 }
@@ -446,7 +446,7 @@ void server_bulk_callback (lnet_event_t *ev)
                "event type %d, status %d, desc %p\n",
                ev->type, ev->status, desc);
 
-        cfs_spin_lock(&desc->bd_lock);
+       spin_lock(&desc->bd_lock);
 
         if ((ev->type == LNET_EVENT_ACK ||
              ev->type == LNET_EVENT_REPLY) &&
@@ -465,8 +465,8 @@ void server_bulk_callback (lnet_event_t *ev)
                 cfs_waitq_signal(&desc->bd_waitq);
         }
 
-        cfs_spin_unlock(&desc->bd_lock);
-        EXIT;
+       spin_unlock(&desc->bd_lock);
+       EXIT;
 }
 #endif
 
@@ -803,7 +803,7 @@ int ptlrpc_init_portals(void)
                 liblustre_register_wait_callback("liblustre_check_services",
                                                  &liblustre_check_services,
                                                  NULL);
-        cfs_init_completion_module(liblustre_wait_event);
+       init_completion_module(liblustre_wait_event);
 #endif
         rc = ptlrpcd_addref();
         if (rc == 0)
index 8407314..c5d7348 100644 (file)
@@ -277,30 +277,30 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
                 RETURN(-EINVAL);
         }
 
-        cfs_spin_lock(&obd->obd_dev_lock);
-        if (obd->obd_stopping) {
-                CERROR("obd %s has stopped\n", obdname);
-                cfs_spin_unlock(&obd->obd_dev_lock);
-                RETURN(-EINVAL);
-        }
-
-        if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
-            strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
-            strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
-                CERROR("obd %s is not a client device\n", obdname);
-                cfs_spin_unlock(&obd->obd_dev_lock);
-                RETURN(-EINVAL);
-        }
-        cfs_spin_unlock(&obd->obd_dev_lock);
-
-        cfs_down_read(&obd->u.cli.cl_sem);
-        if (obd->u.cli.cl_import == NULL) {
-                CERROR("obd %s: import has gone\n", obd->obd_name);
-                cfs_up_read(&obd->u.cli.cl_sem);
-                RETURN(-EINVAL);
-        }
-        imp = class_import_get(obd->u.cli.cl_import);
-        cfs_up_read(&obd->u.cli.cl_sem);
+       spin_lock(&obd->obd_dev_lock);
+       if (obd->obd_stopping) {
+               CERROR("obd %s has stopped\n", obdname);
+               spin_unlock(&obd->obd_dev_lock);
+               RETURN(-EINVAL);
+       }
+
+       if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
+           strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
+           strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
+               CERROR("obd %s is not a client device\n", obdname);
+               spin_unlock(&obd->obd_dev_lock);
+               RETURN(-EINVAL);
+       }
+       spin_unlock(&obd->obd_dev_lock);
+
+       down_read(&obd->u.cli.cl_sem);
+       if (obd->u.cli.cl_import == NULL) {
+               CERROR("obd %s: import has gone\n", obd->obd_name);
+               up_read(&obd->u.cli.cl_sem);
+               RETURN(-EINVAL);
+       }
+       imp = class_import_get(obd->u.cli.cl_import);
+       up_read(&obd->u.cli.cl_sem);
 
         if (imp->imp_deactive) {
                 CERROR("import has been deactivated\n");
index 7cb1fc2..d7b9584 100644 (file)
@@ -205,7 +205,7 @@ static inline __u64 gss_handle_to_u64(rawobj_t *handle)
                                          GSS_SEQ_WIN_MAIN / 4)
 
 struct gss_svc_seq_data {
-        cfs_spinlock_t          ssd_lock;
+       spinlock_t              ssd_lock;
         /*
          * highest sequence number seen so far, for main and back window
          */
@@ -277,10 +277,10 @@ struct gss_cli_ctx_keyring {
 };
 
 struct gss_sec {
-        struct ptlrpc_sec       gs_base;
-        struct gss_api_mech    *gs_mech;
-        cfs_spinlock_t          gs_lock;
-        __u64                   gs_rvs_hdl;
+       struct ptlrpc_sec       gs_base;
+       struct gss_api_mech     *gs_mech;
+       spinlock_t              gs_lock;
+       __u64                   gs_rvs_hdl;
 };
 
 struct gss_sec_pipefs {
@@ -308,10 +308,10 @@ struct gss_sec_keyring {
         /*
          * specially serialize upcalls for root context.
          */
-        cfs_mutex_t             gsk_root_uc_lock;
+       struct mutex                    gsk_root_uc_lock;
 
 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
-        cfs_mutex_t             gsk_uc_lock;        /* serialize upcalls */
+       struct mutex            gsk_uc_lock;    /* serialize upcalls */
 #endif
 };
 
index 932d814..5b00817 100644 (file)
@@ -122,14 +122,14 @@ static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
 static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
 {
 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
-        cfs_mutex_lock(&gsec_kr->gsk_uc_lock);
+       mutex_lock(&gsec_kr->gsk_uc_lock);
 #endif
 }
 
 static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
 {
 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
-        cfs_mutex_unlock(&gsec_kr->gsk_uc_lock);
+       mutex_unlock(&gsec_kr->gsk_uc_lock);
 #endif
 }
 
@@ -218,7 +218,7 @@ struct ptlrpc_cli_ctx *ctx_create_kr(struct ptlrpc_sec *sec,
         }
 
         ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
-        cfs_clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
+       clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
         cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */
 
         return ctx;
@@ -235,7 +235,7 @@ static void ctx_destroy_kr(struct ptlrpc_cli_ctx *ctx)
         LASSERT(sec);
         LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
         LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
-        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+       LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
         LASSERT(gctx_kr->gck_key == NULL);
 
         ctx_clear_timer_kr(ctx);
@@ -282,35 +282,35 @@ static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
  *   - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
  */
 
-static inline void spin_lock_if(cfs_spinlock_t *lock, int condition)
+static inline void spin_lock_if(spinlock_t *lock, int condition)
 {
-        if (condition)
-                cfs_spin_lock(lock);
+       if (condition)
+               spin_lock(lock);
 }
 
-static inline void spin_unlock_if(cfs_spinlock_t *lock, int condition)
+static inline void spin_unlock_if(spinlock_t *lock, int condition)
 {
-        if (condition)
-                cfs_spin_unlock(lock);
+       if (condition)
+               spin_unlock(lock);
 }
 
 static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
 {
-        struct ptlrpc_sec      *sec = ctx->cc_sec;
-        struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+       struct ptlrpc_sec      *sec = ctx->cc_sec;
+       struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
 
-        LASSERT(!cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
-        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+       LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+       LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
-        spin_lock_if(&sec->ps_lock, !locked);
+       spin_lock_if(&sec->ps_lock, !locked);
 
-        cfs_atomic_inc(&ctx->cc_refcount);
-        cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
-        cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
-        if (is_root)
-                gsec_kr->gsk_root_ctx = ctx;
+       cfs_atomic_inc(&ctx->cc_refcount);
+       set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+       cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+       if (is_root)
+               gsec_kr->gsk_root_ctx = ctx;
 
-        spin_unlock_if(&sec->ps_lock, !locked);
+       spin_unlock_if(&sec->ps_lock, !locked);
 }
 
 /*
@@ -326,7 +326,7 @@ static int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked)
         struct gss_sec_keyring  *gsec_kr = sec2gsec_keyring(sec);
 
         /* if hashed bit has gone, leave the job to somebody who is doing it */
-        if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
+       if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
                 return 0;
 
         /* drop ref inside spin lock to prevent race with other operations */
@@ -367,7 +367,7 @@ static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
 static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
 {
         LASSERT(key->payload.data == ctx);
-        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+       LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
 
         /* must revoke the key, or others may treat it as newly created */
         key_revoke_locked(key);
@@ -479,10 +479,10 @@ static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist)
 static
 struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
 {
-        struct gss_sec_keyring  *gsec_kr = sec2gsec_keyring(sec);
-        struct ptlrpc_cli_ctx   *ctx = NULL;
+       struct gss_sec_keyring  *gsec_kr = sec2gsec_keyring(sec);
+       struct ptlrpc_cli_ctx   *ctx = NULL;
 
-        cfs_spin_lock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
 
         ctx = gsec_kr->gsk_root_ctx;
 
@@ -510,9 +510,9 @@ struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
                 cfs_atomic_inc(&ctx->cc_refcount);
         }
 
-        cfs_spin_unlock(&sec->ps_lock);
+       spin_unlock(&sec->ps_lock);
 
-        return ctx;
+       return ctx;
 }
 
 #define RVS_CTX_EXPIRE_NICE    (10)
@@ -530,7 +530,7 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
 
         LASSERT(sec_is_reverse(sec));
 
-        cfs_spin_lock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
 
         now = cfs_time_current_sec();
 
@@ -551,7 +551,7 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
         if (key)
                 bind_key_ctx(key, new_ctx);
 
-        cfs_spin_unlock(&sec->ps_lock);
+       spin_unlock(&sec->ps_lock);
 }
 
 static void construct_key_desc(void *buf, int bufsize,
@@ -579,9 +579,9 @@ struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
 
         CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
         gsec_kr->gsk_root_ctx = NULL;
-        cfs_mutex_init(&gsec_kr->gsk_root_uc_lock);
+       mutex_init(&gsec_kr->gsk_root_uc_lock);
 #ifdef HAVE_KEYRING_UPCALL_SERIALIZED
-        cfs_mutex_init(&gsec_kr->gsk_uc_lock);
+       mutex_init(&gsec_kr->gsk_uc_lock);
 #endif
 
         if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
@@ -707,7 +707,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
          * the root upcall lock, make sure nobody else populated new root
          * context after last check. */
         if (is_root) {
-                cfs_mutex_lock(&gsec_kr->gsk_root_uc_lock);
+               mutex_lock(&gsec_kr->gsk_root_uc_lock);
 
                 ctx = sec_lookup_root_ctx_kr(sec);
                 if (ctx)
@@ -821,7 +821,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
         key_put(key);
 out:
         if (is_root)
-                cfs_mutex_unlock(&gsec_kr->gsk_root_uc_lock);
+               mutex_unlock(&gsec_kr->gsk_root_uc_lock);
         RETURN(ctx);
 }
 
@@ -896,7 +896,7 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
 
         gsec_kr = sec2gsec_keyring(sec);
 
-        cfs_spin_lock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
         cfs_hlist_for_each_entry_safe(ctx, pos, next,
                                       &gsec_kr->gsk_clist, cc_cache) {
                 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
@@ -915,9 +915,9 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
                               cfs_atomic_read(&ctx->cc_refcount) - 2);
                 }
 
-                cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
-                if (!grace)
-                        cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+               set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+               if (!grace)
+                       clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
                 cfs_atomic_inc(&ctx->cc_refcount);
 
@@ -928,10 +928,10 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
                         cfs_atomic_dec(&ctx->cc_refcount);
                 }
         }
-        cfs_spin_unlock(&sec->ps_lock);
+       spin_unlock(&sec->ps_lock);
 
-        dispose_ctx_list_kr(&freelist);
-        EXIT;
+       dispose_ctx_list_kr(&freelist);
+       EXIT;
 }
 
 static
@@ -964,7 +964,7 @@ void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
 
         CWARN("running gc\n");
 
-        cfs_spin_lock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
         cfs_hlist_for_each_entry_safe(ctx, pos, next,
                                       &gsec_kr->gsk_clist, cc_cache) {
                 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
@@ -979,11 +979,11 @@ void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
                         cfs_atomic_dec(&ctx->cc_refcount);
                 }
         }
-        cfs_spin_unlock(&sec->ps_lock);
+       spin_unlock(&sec->ps_lock);
 
-        dispose_ctx_list_kr(&freelist);
-        EXIT;
-        return;
+       dispose_ctx_list_kr(&freelist);
+       EXIT;
+       return;
 }
 
 static
@@ -996,7 +996,7 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
         time_t                  now = cfs_time_current_sec();
         ENTRY;
 
-        cfs_spin_lock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
         cfs_hlist_for_each_entry_safe(ctx, pos, next,
                                   &gsec_kr->gsk_clist, cc_cache) {
                 struct key             *key;
@@ -1031,9 +1031,9 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
                            gss_handle_to_u64(&gctx->gc_svc_handle),
                            mech);
         }
-        cfs_spin_unlock(&sec->ps_lock);
+       spin_unlock(&sec->ps_lock);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 /****************************************
@@ -1242,9 +1242,9 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
          */
         LASSERT(cfs_current()->signal->session_keyring);
 
-        cfs_lockdep_off();
+       lockdep_off();
         rc = key_link(cfs_current()->signal->session_keyring, key);
-        cfs_lockdep_on();
+       lockdep_on();
         if (unlikely(rc)) {
                 CERROR("failed to link key %08x to keyring %08x: %d\n",
                        key->serial,
@@ -1369,7 +1369,7 @@ out:
                 cli_ctx_expire(ctx);
 
                 if (rc != -ERESTART)
-                        cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+                       set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
         }
 
         /* let user space think it's a success */
index b11b267..6ca04c3 100644 (file)
@@ -73,7 +73,7 @@
 #include "gss_asn1.h"
 #include "gss_krb5.h"
 
-static cfs_spinlock_t krb5_seq_lock;
+static spinlock_t krb5_seq_lock;
 
 struct krb5_enctype {
         char           *ke_dispname;
@@ -773,9 +773,9 @@ static void fill_krb5_header(struct krb5_ctx *kctx,
         }
 
         khdr->kh_filler = 0xff;
-        cfs_spin_lock(&krb5_seq_lock);
-        khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
-        cfs_spin_unlock(&krb5_seq_lock);
+       spin_lock(&krb5_seq_lock);
+       khdr->kh_seq = cpu_to_be64(kctx->kc_seq_send++);
+       spin_unlock(&krb5_seq_lock);
 }
 
 static __u32 verify_krb5_header(struct krb5_ctx *kctx,
@@ -1822,14 +1822,14 @@ static struct gss_api_mech gss_kerberos_mech = {
 
 int __init init_kerberos_module(void)
 {
-        int status;
+       int status;
 
-        cfs_spin_lock_init(&krb5_seq_lock);
+       spin_lock_init(&krb5_seq_lock);
 
-        status = lgss_mech_register(&gss_kerberos_mech);
-        if (status)
-                CERROR("Failed to register kerberos gss mechanism!\n");
-        return status;
+       status = lgss_mech_register(&gss_kerberos_mech);
+       if (status)
+               CERROR("Failed to register kerberos gss mechanism!\n");
+       return status;
 }
 
 void __exit cleanup_kerberos_module(void)
index 33f7773..ef6fed8 100644 (file)
@@ -68,19 +68,19 @@ static DEFINE_SPINLOCK(registered_mechs_lock);
 
 int lgss_mech_register(struct gss_api_mech *gm)
 {
-        cfs_spin_lock(&registered_mechs_lock);
-        cfs_list_add(&gm->gm_list, &registered_mechs);
-        cfs_spin_unlock(&registered_mechs_lock);
-        CWARN("Register %s mechanism\n", gm->gm_name);
-        return 0;
+       spin_lock(&registered_mechs_lock);
+       cfs_list_add(&gm->gm_list, &registered_mechs);
+       spin_unlock(&registered_mechs_lock);
+       CWARN("Register %s mechanism\n", gm->gm_name);
+       return 0;
 }
 
 void lgss_mech_unregister(struct gss_api_mech *gm)
 {
-        cfs_spin_lock(&registered_mechs_lock);
-        cfs_list_del(&gm->gm_list);
-        cfs_spin_unlock(&registered_mechs_lock);
-        CWARN("Unregister %s mechanism\n", gm->gm_name);
+       spin_lock(&registered_mechs_lock);
+       cfs_list_del(&gm->gm_list);
+       spin_unlock(&registered_mechs_lock);
+       CWARN("Unregister %s mechanism\n", gm->gm_name);
 }
 
 
@@ -92,19 +92,19 @@ struct gss_api_mech *lgss_mech_get(struct gss_api_mech *gm)
 
 struct gss_api_mech *lgss_name_to_mech(char *name)
 {
-        struct gss_api_mech *pos, *gm = NULL;
-
-        cfs_spin_lock(&registered_mechs_lock);
-        cfs_list_for_each_entry(pos, &registered_mechs, gm_list) {
-                if (0 == strcmp(name, pos->gm_name)) {
-                        if (!cfs_try_module_get(pos->gm_owner))
-                                continue;
-                        gm = pos;
-                        break;
-                }
-        }
-        cfs_spin_unlock(&registered_mechs_lock);
-        return gm;
+       struct gss_api_mech *pos, *gm = NULL;
+
+       spin_lock(&registered_mechs_lock);
+       cfs_list_for_each_entry(pos, &registered_mechs, gm_list) {
+               if (0 == strcmp(name, pos->gm_name)) {
+                       if (!cfs_try_module_get(pos->gm_owner))
+                               continue;
+                       gm = pos;
+                       break;
+               }
+       }
+       spin_unlock(&registered_mechs_lock);
+       return gm;
 
 }
 
@@ -122,9 +122,9 @@ int mech_supports_subflavor(struct gss_api_mech *gm, __u32 subflavor)
 
 struct gss_api_mech *lgss_subflavor_to_mech(__u32 subflavor)
 {
-        struct gss_api_mech *pos, *gm = NULL;
+       struct gss_api_mech *pos, *gm = NULL;
 
-        cfs_spin_lock(&registered_mechs_lock);
+       spin_lock(&registered_mechs_lock);
         cfs_list_for_each_entry(pos, &registered_mechs, gm_list) {
                 if (!cfs_try_module_get(pos->gm_owner))
                         continue;
@@ -135,8 +135,8 @@ struct gss_api_mech *lgss_subflavor_to_mech(__u32 subflavor)
                 gm = pos;
                 break;
         }
-        cfs_spin_unlock(&registered_mechs_lock);
-        return gm;
+       spin_unlock(&registered_mechs_lock);
+       return gm;
 }
 
 void lgss_mech_put(struct gss_api_mech *gm)
index 9a434be..7214b39 100644 (file)
@@ -128,7 +128,7 @@ void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
 static
 void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
 {
-        cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+       set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
         cfs_atomic_inc(&ctx->cc_refcount);
         cfs_hlist_add_head(&ctx->cc_cache, hash);
 }
@@ -141,10 +141,10 @@ void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
 {
         LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
         LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
-        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+       LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
         LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
 
-        cfs_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+       clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
 
         if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
                 __cfs_hlist_del(&ctx->cc_cache);
@@ -176,7 +176,7 @@ int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
 {
         LASSERT(ctx->cc_sec);
         LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
-        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+       LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
 
         return ctx_check_death_pf(ctx, freelist);
 }
@@ -201,7 +201,7 @@ void ctx_list_destroy_pf(cfs_hlist_head_t *head)
                                       cc_cache);
 
                 LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
-                LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT,
+               LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
                                      &ctx->cc_flags) == 0);
 
                 cfs_hlist_del_init(&ctx->cc_cache);
@@ -226,23 +226,23 @@ int gss_cli_ctx_validate_pf(struct ptlrpc_cli_ctx *ctx)
 static
 void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
 {
-        LASSERT(ctx->cc_sec);
-        LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+       LASSERT(ctx->cc_sec);
+       LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
 
-        cli_ctx_expire(ctx);
+       cli_ctx_expire(ctx);
 
-        cfs_spin_lock(&ctx->cc_sec->ps_lock);
+       spin_lock(&ctx->cc_sec->ps_lock);
 
-        if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
-                LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
-                LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
+       if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
+               LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+               LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
 
-                cfs_hlist_del_init(&ctx->cc_cache);
-                if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
-                        LBUG();
-        }
+               cfs_hlist_del_init(&ctx->cc_cache);
+               if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
+                       LBUG();
+       }
 
-        cfs_spin_unlock(&ctx->cc_sec->ps_lock);
+       spin_unlock(&ctx->cc_sec->ps_lock);
 }
 
 /****************************************
@@ -272,7 +272,7 @@ void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
                               (__u64) new->cc_vcred.vc_uid);
         LASSERT(hash < gsec_pf->gsp_chash_size);
 
-        cfs_spin_lock(&gsec->gs_base.ps_lock);
+       spin_lock(&gsec->gs_base.ps_lock);
 
         cfs_hlist_for_each_entry_safe(ctx, pos, next,
                                       &gsec_pf->gsp_chash[hash], cc_cache) {
@@ -286,7 +286,7 @@ void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
 
         ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
 
-        cfs_spin_unlock(&gsec->gs_base.ps_lock);
+       spin_unlock(&gsec->gs_base.ps_lock);
 
         ctx_list_destroy_pf(&freelist);
         EXIT;
@@ -438,7 +438,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec,
         LASSERT(hash < gsec_pf->gsp_chash_size);
 
 retry:
-        cfs_spin_lock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
 
         /* gc_next == 0 means never do gc */
         if (remove_dead && sec->ps_gc_next &&
@@ -474,30 +474,30 @@ retry:
         } else {
                 /* don't allocate for reverse sec */
                 if (sec_is_reverse(sec)) {
-                        cfs_spin_unlock(&sec->ps_lock);
-                        RETURN(NULL);
-                }
-
-                if (new) {
-                        ctx_enhash_pf(new, hash_head);
-                        ctx = new;
-                } else if (create) {
-                        cfs_spin_unlock(&sec->ps_lock);
-                        new = ctx_create_pf(sec, vcred);
-                        if (new) {
-                                cfs_clear_bit(PTLRPC_CTX_NEW_BIT,
-                                              &new->cc_flags);
-                                goto retry;
-                        }
-                } else
-                        ctx = NULL;
-        }
-
-        /* hold a ref */
-        if (ctx)
-                cfs_atomic_inc(&ctx->cc_refcount);
-
-        cfs_spin_unlock(&sec->ps_lock);
+                       spin_unlock(&sec->ps_lock);
+                       RETURN(NULL);
+               }
+
+               if (new) {
+                       ctx_enhash_pf(new, hash_head);
+                       ctx = new;
+               } else if (create) {
+                       spin_unlock(&sec->ps_lock);
+                       new = ctx_create_pf(sec, vcred);
+                       if (new) {
+                               clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
+                               goto retry;
+                       }
+               } else {
+                       ctx = NULL;
+               }
+       }
+
+       /* hold a ref */
+       if (ctx)
+               cfs_atomic_inc(&ctx->cc_refcount);
+
+       spin_unlock(&sec->ps_lock);
 
         /* the allocator of the context must give the first push to refresh */
         if (new) {
@@ -514,13 +514,13 @@ void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec,
                             struct ptlrpc_cli_ctx *ctx,
                             int sync)
 {
-        LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+       LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
         LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
 
         /* if required async, we must clear the UPTODATE bit to prevent extra
          * rpcs during destroy procedure. */
         if (!sync)
-                cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+               clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
         /* destroy this context */
         ctx_destroy_pf(sec, ctx);
@@ -554,7 +554,7 @@ int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
         gsec = container_of(sec, struct gss_sec, gs_base);
         gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
 
-        cfs_spin_lock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
         for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
                 cfs_hlist_for_each_entry_safe(ctx, pos, next,
                                               &gsec_pf->gsp_chash[i],
@@ -577,16 +577,16 @@ int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
                         }
                         ctx_unhash_pf(ctx, &freelist);
 
-                        cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
-                        if (!grace)
-                                cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT,
-                                              &ctx->cc_flags);
-                }
-        }
-        cfs_spin_unlock(&sec->ps_lock);
+                       set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+                       if (!grace)
+                               clear_bit(PTLRPC_CTX_UPTODATE_BIT,
+                                         &ctx->cc_flags);
+               }
+       }
+       spin_unlock(&sec->ps_lock);
 
-        ctx_list_destroy_pf(&freelist);
-        RETURN(busy);
+       ctx_list_destroy_pf(&freelist);
+       RETURN(busy);
 }
 
 /****************************************
@@ -665,18 +665,18 @@ static struct dentry *de_pipes[MECH_MAX] = { NULL, };
 /* all upcall messgaes linked here */
 static cfs_list_t upcall_lists[MECH_MAX];
 /* and protected by this */
-static cfs_spinlock_t upcall_locks[MECH_MAX];
+static spinlock_t upcall_locks[MECH_MAX];
 
 static inline
 void upcall_list_lock(int idx)
 {
-        cfs_spin_lock(&upcall_locks[idx]);
+       spin_lock(&upcall_locks[idx]);
 }
 
 static inline
 void upcall_list_unlock(int idx)
 {
-        cfs_spin_unlock(&upcall_locks[idx]);
+       spin_unlock(&upcall_locks[idx]);
 }
 
 static
@@ -761,7 +761,7 @@ void gss_msg_fail_ctx(struct gss_upcall_msg *gmsg)
 
                 LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
                 sptlrpc_cli_ctx_expire(ctx);
-                cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+               set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
         }
 }
 
@@ -923,11 +923,11 @@ ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
                 ctx = &gctx->gc_base;
                 sptlrpc_cli_ctx_expire(ctx);
                 if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
-                        cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+                       set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
 
                 CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
                        ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
-                       cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
+                      test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
                        "fatal error" : "non-fatal");
         }
 
@@ -1209,9 +1209,9 @@ int __init gss_init_pipefs_upcall(void)
 
         de_pipes[MECH_KRB5] = de;
         CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
-        cfs_spin_lock_init(&upcall_locks[MECH_KRB5]);
+       spin_lock_init(&upcall_locks[MECH_KRB5]);
 
-        return 0;
+       return 0;
 }
 
 static
index 3ad70b3..4e663cb 100644 (file)
 
 #define GSS_SVC_UPCALL_TIMEOUT  (20)
 
-static cfs_spinlock_t __ctx_index_lock;
+static spinlock_t __ctx_index_lock;
 static __u64 __ctx_index;
 
 __u64 gss_get_next_ctx_index(void)
 {
-        __u64 idx;
+       __u64 idx;
 
-        cfs_spin_lock(&__ctx_index_lock);
-        idx = __ctx_index++;
-        cfs_spin_unlock(&__ctx_index_lock);
+       spin_lock(&__ctx_index_lock);
+       idx = __ctx_index++;
+       spin_unlock(&__ctx_index_lock);
 
-        return idx;
+       return idx;
 }
 
 static inline unsigned long hash_mem(char *buf, int length, int bits)
@@ -446,7 +446,7 @@ static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
         tmp->ctx.gsc_mechctx = NULL;
 
         memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
-        cfs_spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+       spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
 }
 
 static void rsc_put(struct kref *ref)
@@ -567,7 +567,7 @@ static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
                 goto out;
         if (rv == -ENOENT) {
                 CERROR("NOENT? set rsc entry negative\n");
-                cfs_set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+               set_bit(CACHE_NEGATIVE, &rsci.h.flags);
         } else {
                 rawobj_t tmp_buf;
                 unsigned long ctx_expiry;
@@ -674,7 +674,7 @@ static void rsc_flush(rsc_entry_match *match, long data)
         int n;
         ENTRY;
 
-        cfs_write_lock(&rsc_cache.hash_lock);
+       write_lock(&rsc_cache.hash_lock);
         for (n = 0; n < RSC_HASHMAX; n++) {
                 for (ch = &rsc_cache.hash_table[n]; *ch;) {
                         rscp = container_of(*ch, struct rsc, h);
@@ -688,12 +688,12 @@ static void rsc_flush(rsc_entry_match *match, long data)
                         *ch = (*ch)->next;
                         rscp->h.next = NULL;
                         cache_get(&rscp->h);
-                        cfs_set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+                       set_bit(CACHE_NEGATIVE, &rscp->h.flags);
                         COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
                         rsc_cache.entries--;
                 }
         }
-        cfs_write_unlock(&rsc_cache.hash_lock);
+       write_unlock(&rsc_cache.hash_lock);
         EXIT;
 }
 
@@ -912,7 +912,7 @@ cache_check:
                         first_check = 0;
 
                         read_lock(&rsi_cache.hash_lock);
-                        valid = cfs_test_bit(CACHE_VALID, &rsip->h.flags);
+                       valid = test_bit(CACHE_VALID, &rsip->h.flags);
                         if (valid == 0)
                                 cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
                         read_unlock(&rsi_cache.hash_lock);
@@ -1019,7 +1019,7 @@ out:
         if (rsci) {
                 /* if anything went wrong, we don't keep the context too */
                 if (rc != SECSVC_OK)
-                        cfs_set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+                       set_bit(CACHE_NEGATIVE, &rsci->h.flags);
                 else
                         CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
                                gss_handle_to_u64(&rsci->handle));
@@ -1057,16 +1057,16 @@ void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
         struct rsc *rsc = container_of(ctx, struct rsc, ctx);
 
         /* can't be found */
-        cfs_set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+       set_bit(CACHE_NEGATIVE, &rsc->h.flags);
         /* to be removed at next scan */
         rsc->h.expiry_time = 1;
 }
 
 int __init gss_init_svc_upcall(void)
 {
-        int     i;
+       int     i;
 
-        cfs_spin_lock_init(&__ctx_index_lock);
+       spin_lock_init(&__ctx_index_lock);
         /*
          * this helps reducing context index confliction. after server reboot,
          * conflicting request from clients might be filtered out by initial
index 974546a..fd0b7b1 100644 (file)
@@ -64,7 +64,7 @@ static struct proc_dir_entry *gss_proc_lk = NULL;
  * statistic of "out-of-sequence-window"
  */
 static struct {
-        cfs_spinlock_t  oos_lock;
+       spinlock_t  oos_lock;
         cfs_atomic_t    oos_cli_count;       /* client occurrence */
         int             oos_cli_behind;      /* client max seqs behind */
         cfs_atomic_t    oos_svc_replay[3];   /* server replay detected */
@@ -78,12 +78,12 @@ static struct {
 
 void gss_stat_oos_record_cli(int behind)
 {
-        cfs_atomic_inc(&gss_stat_oos.oos_cli_count);
+       cfs_atomic_inc(&gss_stat_oos.oos_cli_count);
 
-        cfs_spin_lock(&gss_stat_oos.oos_lock);
-        if (behind > gss_stat_oos.oos_cli_behind)
-                gss_stat_oos.oos_cli_behind = behind;
-        cfs_spin_unlock(&gss_stat_oos.oos_lock);
+       spin_lock(&gss_stat_oos.oos_lock);
+       if (behind > gss_stat_oos.oos_cli_behind)
+               gss_stat_oos.oos_cli_behind = behind;
+       spin_unlock(&gss_stat_oos.oos_lock);
 }
 
 void gss_stat_oos_record_svc(int phase, int replay)
@@ -194,9 +194,9 @@ void gss_exit_lproc(void)
 
 int gss_init_lproc(void)
 {
-        int     rc;
+       int     rc;
 
-        cfs_spin_lock_init(&gss_stat_oos.oos_lock);
+       spin_lock_init(&gss_stat_oos.oos_lock);
 
         gss_proc_root = lprocfs_register("gss", sptlrpc_proc_root,
                                          gss_lprocfs_vars, NULL);
index a428743..cae8339 100644 (file)
@@ -331,9 +331,9 @@ int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
 {
         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
 
-        if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
+       if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
                 if (!ctx->cc_early_expire)
-                        cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+                       clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
                 CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n",
                       ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
@@ -387,7 +387,7 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx)
          * someone else, in which case nobody will make further use
          * of it. we don't care, and mark it UPTODATE will help
          * destroying server side context when it be destroied. */
-        cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+       set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
 
         if (sec_is_reverse(ctx->cc_sec)) {
                 CWARN("server installed reverse ctx %p idx "LPX64", "
@@ -509,7 +509,7 @@ int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
                  */
                 switch (phase) {
                 case 0:
-                        if (cfs_test_bit(seq_num % win_size, window))
+                       if (test_bit(seq_num % win_size, window))
                                 goto replay;
                         break;
                 case 1:
@@ -539,9 +539,9 @@ replay:
  */
 int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
 {
-        int rc = 0;
+       int rc = 0;
 
-        cfs_spin_lock(&ssd->ssd_lock);
+       spin_lock(&ssd->ssd_lock);
 
         if (set == 0) {
                 /*
@@ -575,8 +575,8 @@ int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set)
                         gss_stat_oos_record_svc(2, 0);
         }
 exit:
-        cfs_spin_unlock(&ssd->ssd_lock);
-        return rc;
+       spin_unlock(&ssd->ssd_lock);
+       return rc;
 }
 
 /***************************************
@@ -1117,7 +1117,7 @@ int gss_sec_create_common(struct gss_sec *gsec,
                 return -EOPNOTSUPP;
         }
 
-        cfs_spin_lock_init(&gsec->gs_lock);
+       spin_lock_init(&gsec->gs_lock);
         gsec->gs_rvs_hdl = 0ULL;
 
         /* initialize upper ptlrpc_sec */
@@ -1128,7 +1128,7 @@ int gss_sec_create_common(struct gss_sec *gsec,
         sec->ps_id = sptlrpc_get_next_secid();
         sec->ps_flvr = *sf;
         sec->ps_import = class_import_get(imp);
-        cfs_spin_lock_init(&sec->ps_lock);
+       spin_lock_init(&sec->ps_lock);
         CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
 
         if (!svcctx) {
@@ -1192,7 +1192,7 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec,
         ctx->cc_expire = 0;
         ctx->cc_flags = PTLRPC_CTX_NEW;
         ctx->cc_vcred = *vcred;
-        cfs_spin_lock_init(&ctx->cc_lock);
+       spin_lock_init(&ctx->cc_lock);
         CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
         CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
 
index d93c17a..4fa84f5 100644 (file)
@@ -86,11 +86,11 @@ do {                                                                           \
         }                                                                      \
 } while(0)
 
-#define IMPORT_SET_STATE(imp, state)            \
-do {                                            \
-        cfs_spin_lock(&imp->imp_lock);          \
-        IMPORT_SET_STATE_NOLOCK(imp, state);    \
-        cfs_spin_unlock(&imp->imp_lock);        \
+#define IMPORT_SET_STATE(imp, state)                                   \
+do {                                                                   \
+       spin_lock(&imp->imp_lock);                                      \
+       IMPORT_SET_STATE_NOLOCK(imp, state);                            \
+       spin_unlock(&imp->imp_lock);                                    \
 } while(0)
 
 
@@ -106,14 +106,14 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
  * though. */
 int ptlrpc_init_import(struct obd_import *imp)
 {
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
 
-        imp->imp_generation++;
-        imp->imp_state =  LUSTRE_IMP_NEW;
+       imp->imp_generation++;
+       imp->imp_state =  LUSTRE_IMP_NEW;
 
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
-        return 0;
+       return 0;
 }
 EXPORT_SYMBOL(ptlrpc_init_import);
 
@@ -147,9 +147,9 @@ EXPORT_SYMBOL(deuuidify);
  */
 int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
 {
-        int rc = 0;
+       int rc = 0;
 
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
 
         if (imp->imp_state == LUSTRE_IMP_FULL &&
             (conn_cnt == 0 || conn_cnt == imp->imp_conn_cnt)) {
@@ -175,15 +175,15 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
                 }
                 ptlrpc_deactivate_timeouts(imp);
                 IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_unlock(&imp->imp_lock);
 
-                if (obd_dump_on_timeout)
-                        libcfs_debug_dumplog();
+               if (obd_dump_on_timeout)
+                       libcfs_debug_dumplog();
 
-                obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
-                rc = 1;
-        } else {
-                cfs_spin_unlock(&imp->imp_lock);
+               obd_import_event(imp->imp_obd, imp, IMP_EVENT_DISCON);
+               rc = 1;
+       } else {
+               spin_unlock(&imp->imp_lock);
                 CDEBUG(D_HA, "%s: import %p already %s (conn %u, was %u): %s\n",
                        imp->imp_client->cli_name, imp,
                        (imp->imp_state == LUSTRE_IMP_FULL &&
@@ -198,18 +198,18 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
 /* Must be called with imp_lock held! */
 static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
 {
-        ENTRY;
-        LASSERT_SPIN_LOCKED(&imp->imp_lock);
+       ENTRY;
+       LASSERT_SPIN_LOCKED(&imp->imp_lock);
 
-        CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
-        imp->imp_invalid = 1;
-        imp->imp_generation++;
-        cfs_spin_unlock(&imp->imp_lock);
+       CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
+       imp->imp_invalid = 1;
+       imp->imp_generation++;
+       spin_unlock(&imp->imp_lock);
 
-        ptlrpc_abort_inflight(imp);
-        obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
+       ptlrpc_abort_inflight(imp);
+       obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
 
-        EXIT;
+       EXIT;
 }
 
 /*
@@ -218,8 +218,8 @@ static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
  */
 void ptlrpc_deactivate_import(struct obd_import *imp)
 {
-        cfs_spin_lock(&imp->imp_lock);
-        ptlrpc_deactivate_and_unlock_import(imp);
+       spin_lock(&imp->imp_lock);
+       ptlrpc_deactivate_and_unlock_import(imp);
 }
 EXPORT_SYMBOL(ptlrpc_deactivate_import);
 
@@ -249,18 +249,18 @@ ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
 
 static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
 {
-        time_t now = cfs_time_current_sec();
-        cfs_list_t *tmp, *n;
-        struct ptlrpc_request *req;
-        unsigned int timeout = 0;
-
-        cfs_spin_lock(&imp->imp_lock);
-        cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
-                req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
-                timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
-        }
-        cfs_spin_unlock(&imp->imp_lock);
-        return timeout;
+       time_t now = cfs_time_current_sec();
+       cfs_list_t *tmp, *n;
+       struct ptlrpc_request *req;
+       unsigned int timeout = 0;
+
+       spin_lock(&imp->imp_lock);
+       cfs_list_for_each_safe(tmp, n, &imp->imp_sending_list) {
+               req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+               timeout = max(ptlrpc_inflight_deadline(req, now), timeout);
+       }
+       spin_unlock(&imp->imp_lock);
+       return timeout;
 }
 
 /**
@@ -323,7 +323,7 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
                                cli_tgt, rc,
                                cfs_atomic_read(&imp->imp_inflight));
 
-                        cfs_spin_lock(&imp->imp_lock);
+                       spin_lock(&imp->imp_lock);
                         if (cfs_atomic_read(&imp->imp_inflight) == 0) {
                                 int count = cfs_atomic_read(&imp->imp_unregistering);
 
@@ -365,7 +365,7 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
                                        cfs_atomic_read(&imp->
                                                        imp_unregistering));
                         }
-                        cfs_spin_unlock(&imp->imp_lock);
+                       spin_unlock(&imp->imp_lock);
                   }
         } while (rc != 0);
 
@@ -385,13 +385,13 @@ EXPORT_SYMBOL(ptlrpc_invalidate_import);
 /* unset imp_invalid */
 void ptlrpc_activate_import(struct obd_import *imp)
 {
-        struct obd_device *obd = imp->imp_obd;
+       struct obd_device *obd = imp->imp_obd;
 
-        cfs_spin_lock(&imp->imp_lock);
-        imp->imp_invalid = 0;
-        ptlrpc_activate_timeouts(imp);
-        cfs_spin_unlock(&imp->imp_lock);
-        obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
+       spin_lock(&imp->imp_lock);
+       imp->imp_invalid = 0;
+       ptlrpc_activate_timeouts(imp);
+       spin_unlock(&imp->imp_lock);
+       obd_import_event(obd, imp, IMP_EVENT_ACTIVE);
 }
 EXPORT_SYMBOL(ptlrpc_activate_import);
 
@@ -414,13 +414,13 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
                 CDEBUG(D_HA, "%s: waking up pinger\n",
                        obd2cli_tgt(imp->imp_obd));
 
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_force_verify = 1;
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               imp->imp_force_verify = 1;
+               spin_unlock(&imp->imp_lock);
 
-                ptlrpc_pinger_wake_up();
-        }
-        EXIT;
+               ptlrpc_pinger_wake_up();
+       }
+       EXIT;
 }
 EXPORT_SYMBOL(ptlrpc_fail_import);
 
@@ -466,14 +466,14 @@ static int import_select_connection(struct obd_import *imp)
         int target_len, tried_all = 1;
         ENTRY;
 
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
 
-        if (cfs_list_empty(&imp->imp_conn_list)) {
-                CERROR("%s: no connections available\n",
-                        imp->imp_obd->obd_name);
-                cfs_spin_unlock(&imp->imp_lock);
-                RETURN(-EINVAL);
-        }
+       if (cfs_list_empty(&imp->imp_conn_list)) {
+               CERROR("%s: no connections available\n",
+                      imp->imp_obd->obd_name);
+               spin_unlock(&imp->imp_lock);
+               RETURN(-EINVAL);
+       }
 
         cfs_list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
                 CDEBUG(D_HA, "%s: connect to NID %s last attempt "LPU64"\n",
@@ -559,9 +559,9 @@ static int import_select_connection(struct obd_import *imp)
                imp->imp_obd->obd_name, imp, imp_conn->oic_uuid.uuid,
                libcfs_nid2str(imp_conn->oic_conn->c_peer.nid));
 
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 /*
@@ -607,20 +607,20 @@ int ptlrpc_connect_import(struct obd_import *imp)
         int rc;
         ENTRY;
 
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_state == LUSTRE_IMP_CLOSED) {
-                cfs_spin_unlock(&imp->imp_lock);
-                CERROR("can't connect to a closed import\n");
-                RETURN(-EINVAL);
-        } else if (imp->imp_state == LUSTRE_IMP_FULL) {
-                cfs_spin_unlock(&imp->imp_lock);
-                CERROR("already connected\n");
-                RETURN(0);
-        } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
-                cfs_spin_unlock(&imp->imp_lock);
-                CERROR("already connecting\n");
-                RETURN(-EALREADY);
-        }
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_state == LUSTRE_IMP_CLOSED) {
+               spin_unlock(&imp->imp_lock);
+               CERROR("can't connect to a closed import\n");
+               RETURN(-EINVAL);
+       } else if (imp->imp_state == LUSTRE_IMP_FULL) {
+               spin_unlock(&imp->imp_lock);
+               CERROR("already connected\n");
+               RETURN(0);
+       } else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
+               spin_unlock(&imp->imp_lock);
+               CERROR("already connecting\n");
+               RETURN(-EALREADY);
+       }
 
         IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
 
@@ -634,7 +634,7 @@ int ptlrpc_connect_import(struct obd_import *imp)
 
         set_transno = ptlrpc_first_transno(imp,
                                            &imp->imp_connect_data.ocd_transno);
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
         rc = import_select_connection(imp);
         if (rc)
@@ -702,9 +702,9 @@ int ptlrpc_connect_import(struct obd_import *imp)
         aa->pcaa_initial_connect = initial_connect;
 
         if (aa->pcaa_initial_connect) {
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_replayable = 1;
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               imp->imp_replayable = 1;
+               spin_unlock(&imp->imp_lock);
                 lustre_msg_add_op_flags(request->rq_reqmsg,
                                         MSG_CONNECT_INITIAL);
         }
@@ -729,17 +729,17 @@ EXPORT_SYMBOL(ptlrpc_connect_import);
 static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp)
 {
 #ifdef __KERNEL__
-        int force_verify;
+       int force_verify;
 
-        cfs_spin_lock(&imp->imp_lock);
-        force_verify = imp->imp_force_verify != 0;
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       force_verify = imp->imp_force_verify != 0;
+       spin_unlock(&imp->imp_lock);
 
-        if (force_verify)
-                ptlrpc_pinger_wake_up();
+       if (force_verify)
+               ptlrpc_pinger_wake_up();
 #else
         /* liblustre has no pinger thread, so we wakeup pinger anyway */
-        ptlrpc_pinger_wake_up();
+       ptlrpc_pinger_wake_up();
 #endif
 }
 
@@ -767,23 +767,23 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
        struct obd_connect_data *ocd;
        struct obd_export *exp;
        int ret;
-        ENTRY;
+       ENTRY;
 
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_state == LUSTRE_IMP_CLOSED) {
-                cfs_spin_unlock(&imp->imp_lock);
-                RETURN(0);
-        }
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_state == LUSTRE_IMP_CLOSED) {
+               spin_unlock(&imp->imp_lock);
+               RETURN(0);
+       }
 
-        if (rc) {
-                /* if this reconnect to busy export - not need select new target
-                 * for connecting*/
-                imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
-                cfs_spin_unlock(&imp->imp_lock);
-                ptlrpc_maybe_ping_import_soon(imp);
-                GOTO(out, rc);
-        }
-       cfs_spin_unlock(&imp->imp_lock);
+       if (rc) {
+               /* if this reconnect to busy export - not need select new target
+                * for connecting*/
+               imp->imp_force_reconnect = ptlrpc_busy_reconnect(rc);
+               spin_unlock(&imp->imp_lock);
+               ptlrpc_maybe_ping_import_soon(imp);
+               GOTO(out, rc);
+       }
+       spin_unlock(&imp->imp_lock);
 
         LASSERT(imp->imp_conn_current);
 
@@ -802,7 +802,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
                GOTO(out, rc);
        }
 
-       cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
 
         /* All imports are pingable */
         imp->imp_pingable = 1;
@@ -815,7 +815,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
               imp->imp_obd->obd_name, ocd->ocd_instance);
        exp = class_conn2export(&imp->imp_dlm_handle);
 
-       cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
        /* check that server granted subset of flags we asked for. */
        if ((ocd->ocd_connect_flags & imp->imp_connect_flags_orig) !=
@@ -842,17 +842,17 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
 
        obd_import_event(imp->imp_obd, imp, IMP_EVENT_OCD);
 
-        if (aa->pcaa_initial_connect) {
-               cfs_spin_lock(&imp->imp_lock);
-                if (msg_flags & MSG_CONNECT_REPLAYABLE) {
-                        imp->imp_replayable = 1;
-                        cfs_spin_unlock(&imp->imp_lock);
-                        CDEBUG(D_HA, "connected to replayable target: %s\n",
-                               obd2cli_tgt(imp->imp_obd));
-                } else {
-                        imp->imp_replayable = 0;
-                        cfs_spin_unlock(&imp->imp_lock);
-                }
+       if (aa->pcaa_initial_connect) {
+               spin_lock(&imp->imp_lock);
+               if (msg_flags & MSG_CONNECT_REPLAYABLE) {
+                       imp->imp_replayable = 1;
+                       spin_unlock(&imp->imp_lock);
+                       CDEBUG(D_HA, "connected to replayable target: %s\n",
+                              obd2cli_tgt(imp->imp_obd));
+               } else {
+                       imp->imp_replayable = 0;
+                       spin_unlock(&imp->imp_lock);
+               }
 
                 /* if applies, adjust the imp->imp_msg_magic here
                  * according to reply flags */
@@ -946,9 +946,9 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
                                imp->imp_obd->obd_name,
                                obd2cli_tgt(imp->imp_obd));
 
-                        cfs_spin_lock(&imp->imp_lock);
-                        imp->imp_resend_replay = 1;
-                        cfs_spin_unlock(&imp->imp_lock);
+                       spin_lock(&imp->imp_lock);
+                       imp->imp_resend_replay = 1;
+                       spin_unlock(&imp->imp_lock);
 
                         IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY);
                 } else {
@@ -998,14 +998,14 @@ finish:
                 }
         } else {
 
-                cfs_spin_lock(&imp->imp_lock);
-                cfs_list_del(&imp->imp_conn_current->oic_item);
-                cfs_list_add(&imp->imp_conn_current->oic_item,
-                             &imp->imp_conn_list);
-                imp->imp_last_success_conn =
-                        imp->imp_conn_current->oic_last_attempt;
+               spin_lock(&imp->imp_lock);
+               cfs_list_del(&imp->imp_conn_current->oic_item);
+               cfs_list_add(&imp->imp_conn_current->oic_item,
+                            &imp->imp_conn_list);
+               imp->imp_last_success_conn =
+                       imp->imp_conn_current->oic_last_attempt;
 
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_unlock(&imp->imp_lock);
 
                 if (!ocd->ocd_ibits_known &&
                     ocd->ocd_connect_flags & OBD_CONNECT_IBITS)
@@ -1328,9 +1328,9 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
                        obd2cli_tgt(imp->imp_obd),
                        imp->imp_connection->c_remote_uuid.uuid);
                 /* reset vbr_failed flag upon eviction */
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_vbr_failed = 0;
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               imp->imp_vbr_failed = 0;
+               spin_unlock(&imp->imp_lock);
 
 #ifdef __KERNEL__
                 /* bug 17802:  XXX client_disconnect_export vs connect request
@@ -1456,11 +1456,11 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
 
         }
 
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_state != LUSTRE_IMP_FULL)
-                GOTO(out, 0);
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_state != LUSTRE_IMP_FULL)
+               GOTO(out, 0);
 
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
         req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_DISCONNECT,
                                         LUSTRE_OBD_VERSION, rq_opc);
@@ -1483,30 +1483,30 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
         }
 
 set_state:
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
 out:
-        if (noclose)
-                IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
-        else
-                IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
-        memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
-        cfs_spin_unlock(&imp->imp_lock);
-
-        RETURN(rc);
+       if (noclose)
+               IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_DISCON);
+       else
+               IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
+       memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
+       spin_unlock(&imp->imp_lock);
+
+       RETURN(rc);
 }
 EXPORT_SYMBOL(ptlrpc_disconnect_import);
 
 void ptlrpc_cleanup_imp(struct obd_import *imp)
 {
-        ENTRY;
+       ENTRY;
 
-        cfs_spin_lock(&imp->imp_lock);
-        IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
-        imp->imp_generation++;
-        cfs_spin_unlock(&imp->imp_lock);
-        ptlrpc_abort_inflight(imp);
+       spin_lock(&imp->imp_lock);
+       IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
+       imp->imp_generation++;
+       spin_unlock(&imp->imp_lock);
+       ptlrpc_abort_inflight(imp);
 
-        EXIT;
+       EXIT;
 }
 EXPORT_SYMBOL(ptlrpc_cleanup_imp);
 
@@ -1533,7 +1533,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
                    drop to 0, and because 0 could mean an error */
                 return 0;
 
-        cfs_spin_lock(&at->at_lock);
+       spin_lock(&at->at_lock);
 
         if (unlikely(at->at_binstart == 0)) {
                 /* Special case to remove default from history */
@@ -1589,7 +1589,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
         /* if we changed, report the old value */
         old = (at->at_current != old) ? old : 0;
 
-        cfs_spin_unlock(&at->at_lock);
+       spin_unlock(&at->at_lock);
         return old;
 }
 
@@ -1608,7 +1608,7 @@ int import_at_get_index(struct obd_import *imp, int portal)
         }
 
         /* Not found in list, add it under a lock */
-        cfs_spin_lock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
 
         /* Check unused under lock */
         for (; i < IMP_AT_MAX_PORTALS; i++) {
@@ -1624,6 +1624,6 @@ int import_at_get_index(struct obd_import *imp, int portal)
 
         at->iat_portal[i] = portal;
 out:
-        cfs_spin_unlock(&imp->imp_lock);
-        return i;
+       spin_unlock(&imp->imp_lock);
+       return i;
 }
index fff090e..ab0b474 100644 (file)
@@ -54,7 +54,7 @@
 #include <libcfs/list.h>
 
 #define LLOG_CLIENT_ENTRY(ctxt, imp) do {                             \
-        cfs_mutex_lock(&ctxt->loc_mutex);                             \
+       mutex_lock(&ctxt->loc_mutex);                             \
         if (ctxt->loc_imp) {                                          \
                 imp = class_import_get(ctxt->loc_imp);                \
         } else {                                                      \
                        "but I'll try again next time.  Not fatal.\n", \
                        ctxt->loc_idx);                                \
                 imp = NULL;                                           \
-                cfs_mutex_unlock(&ctxt->loc_mutex);                   \
+               mutex_unlock(&ctxt->loc_mutex);                   \
                 return (-EINVAL);                                     \
         }                                                             \
-        cfs_mutex_unlock(&ctxt->loc_mutex);                           \
+       mutex_unlock(&ctxt->loc_mutex);                           \
 } while(0)
 
 #define LLOG_CLIENT_EXIT(ctxt, imp) do {                              \
-        cfs_mutex_lock(&ctxt->loc_mutex);                             \
+       mutex_lock(&ctxt->loc_mutex);                             \
         if (ctxt->loc_imp != imp)                                     \
                 CWARN("loc_imp has changed from %p to %p\n",          \
                        ctxt->loc_imp, imp);                           \
         class_import_put(imp);                                        \
-        cfs_mutex_unlock(&ctxt->loc_mutex);                           \
+       mutex_unlock(&ctxt->loc_mutex);                           \
 } while(0)
 
 /* This is a callback from the llog_* functions.
index 9db6799..fdcc5f6 100644 (file)
@@ -155,7 +155,7 @@ int llog_receptor_accept(struct llog_ctxt *ctxt, struct obd_import *imp)
         ENTRY;
 
         LASSERT(ctxt);
-        cfs_mutex_lock(&ctxt->loc_mutex);
+       mutex_lock(&ctxt->loc_mutex);
         if (ctxt->loc_imp != imp) {
                 if (ctxt->loc_imp) {
                         CWARN("changing the import %p - %p\n",
@@ -164,7 +164,7 @@ int llog_receptor_accept(struct llog_ctxt *ctxt, struct obd_import *imp)
                 }
                 ctxt->loc_imp = class_import_get(imp);
         }
-        cfs_mutex_unlock(&ctxt->loc_mutex);
+       mutex_unlock(&ctxt->loc_mutex);
         RETURN(0);
 }
 EXPORT_SYMBOL(llog_receptor_accept);
@@ -188,13 +188,13 @@ int llog_initiator_connect(struct llog_ctxt *ctxt)
         new_imp = ctxt->loc_obd->u.cli.cl_import;
         LASSERTF(ctxt->loc_imp == NULL || ctxt->loc_imp == new_imp,
                  "%p - %p\n", ctxt->loc_imp, new_imp);
-        cfs_mutex_lock(&ctxt->loc_mutex);
+       mutex_lock(&ctxt->loc_mutex);
         if (ctxt->loc_imp != new_imp) {
                 if (ctxt->loc_imp)
                         class_import_put(ctxt->loc_imp);
                 ctxt->loc_imp = class_import_get(new_imp);
         }
-        cfs_mutex_unlock(&ctxt->loc_mutex);
+       mutex_unlock(&ctxt->loc_mutex);
         RETURN(0);
 }
 EXPORT_SYMBOL(llog_initiator_connect);
index 0caad4a..6282c28 100644 (file)
@@ -303,14 +303,14 @@ ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer,
         if (val > cfs_num_physpages/(2 * bufpages))
                 return -ERANGE;
 
-       cfs_spin_lock(&svc->srv_lock);
+       spin_lock(&svc->srv_lock);
 
        if (val == 0)
                svc->srv_hist_nrqbds_cpt_max = 0;
        else
                svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts));
 
-       cfs_spin_unlock(&svc->srv_lock);
+       spin_unlock(&svc->srv_lock);
 
        return count;
 }
@@ -339,15 +339,15 @@ ptlrpc_lprocfs_wr_threads_min(struct file *file, const char *buffer,
        if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
                return -ERANGE;
 
-       cfs_spin_lock(&svc->srv_lock);
+       spin_lock(&svc->srv_lock);
        if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) {
-               cfs_spin_unlock(&svc->srv_lock);
+               spin_unlock(&svc->srv_lock);
                return -ERANGE;
        }
 
        svc->srv_nthrs_cpt_init = val / svc->srv_ncpts;
 
-       cfs_spin_unlock(&svc->srv_lock);
+       spin_unlock(&svc->srv_lock);
 
        return count;
 }
@@ -392,15 +392,15 @@ ptlrpc_lprocfs_wr_threads_max(struct file *file, const char *buffer,
        if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT)
                return -ERANGE;
 
-       cfs_spin_lock(&svc->srv_lock);
+       spin_lock(&svc->srv_lock);
        if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) {
-               cfs_spin_unlock(&svc->srv_lock);
+               spin_unlock(&svc->srv_lock);
                return -ERANGE;
        }
 
        svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts;
 
-       cfs_spin_unlock(&svc->srv_lock);
+       spin_unlock(&svc->srv_lock);
 
        return count;
 }
@@ -473,9 +473,9 @@ ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos)
        ptlrpc_service_for_each_part(svcpt, i, svc) {
                srhi->srhi_idx = i;
 
-               cfs_spin_lock(&svcpt->scp_lock);
+               spin_lock(&svcpt->scp_lock);
                rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, *pos);
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
                if (rc == 0) {
                        *pos = srhi->srhi_seq;
                        return srhi;
@@ -510,9 +510,9 @@ ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
 
                srhi->srhi_idx = i;
 
-               cfs_spin_lock(&svcpt->scp_lock);
+               spin_lock(&svcpt->scp_lock);
                rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, *pos + 1);
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
                if (rc == 0)
                        break;
        }
@@ -568,7 +568,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
 
        svcpt = svc->srv_parts[srhi->srhi_idx];
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
 
        rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq);
 
@@ -594,7 +594,7 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter)
                        svc->srv_ops.so_req_printer(s, srhi->srhi_req);
         }
 
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
        return rc;
 }
 
@@ -702,9 +702,9 @@ static int ptlrpc_lprocfs_wr_hp_ratio(struct file *file, const char *buffer,
        if (val < 0)
                return -ERANGE;
 
-       cfs_spin_lock(&svc->srv_lock);
+       spin_lock(&svc->srv_lock);
        svc->srv_hpreq_ratio = val;
-       cfs_spin_unlock(&svc->srv_lock);
+       spin_unlock(&svc->srv_lock);
 
        return count;
 }
@@ -1009,12 +1009,12 @@ int lprocfs_wr_pinger_recov(struct file *file, const char *buffer,
                 return -ERANGE;
 
         LPROCFS_CLIMP_CHECK(obd);
-        cfs_spin_lock(&imp->imp_lock);
-        imp->imp_no_pinger_recover = !val;
-        cfs_spin_unlock(&imp->imp_lock);
-        LPROCFS_CLIMP_EXIT(obd);
+       spin_lock(&imp->imp_lock);
+       imp->imp_no_pinger_recover = !val;
+       spin_unlock(&imp->imp_lock);
+       LPROCFS_CLIMP_EXIT(obd);
 
-        return count;
+       return count;
 
 }
 EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
index 4233689..c215ed0 100644 (file)
@@ -672,7 +672,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                 }
         }
 
-        cfs_spin_lock(&request->rq_lock);
+       spin_lock(&request->rq_lock);
         /* If the MD attach succeeds, there _will_ be a reply_in callback */
         request->rq_receiving_reply = !noreply;
         /* We are responsible for unlinking the reply buffer */
@@ -685,7 +685,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
         request->rq_resend = 0;
         request->rq_restart = 0;
         request->rq_reply_truncate = 0;
-        cfs_spin_unlock(&request->rq_lock);
+       spin_unlock(&request->rq_lock);
 
         if (!noreply) {
                 reply_md.start     = request->rq_repbuf;
@@ -706,10 +706,10 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                 if (rc != 0) {
                         CERROR("LNetMDAttach failed: %d\n", rc);
                         LASSERT (rc == -ENOMEM);
-                        cfs_spin_lock(&request->rq_lock);
-                        /* ...but the MD attach didn't succeed... */
-                        request->rq_receiving_reply = 0;
-                        cfs_spin_unlock(&request->rq_lock);
+                       spin_lock(&request->rq_lock);
+                       /* ...but the MD attach didn't succeed... */
+                       request->rq_receiving_reply = 0;
+                       spin_unlock(&request->rq_lock);
                         GOTO(cleanup_me, rc = -ENOMEM);
                 }
 
index ea1c641..9ac472d 100644 (file)
@@ -261,20 +261,20 @@ EXPORT_SYMBOL(lustre_pack_request);
 
 #if RS_DEBUG
 CFS_LIST_HEAD(ptlrpc_rs_debug_lru);
-cfs_spinlock_t ptlrpc_rs_debug_lock;
+spinlock_t ptlrpc_rs_debug_lock;
 
-#define PTLRPC_RS_DEBUG_LRU_ADD(rs)                                     \
-do {                                                                    \
-        cfs_spin_lock(&ptlrpc_rs_debug_lock);                           \
-        cfs_list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru);  \
-        cfs_spin_unlock(&ptlrpc_rs_debug_lock);                         \
+#define PTLRPC_RS_DEBUG_LRU_ADD(rs)                                    \
+do {                                                                   \
+       spin_lock(&ptlrpc_rs_debug_lock);                               \
+       cfs_list_add_tail(&(rs)->rs_debug_list, &ptlrpc_rs_debug_lru);  \
+       spin_unlock(&ptlrpc_rs_debug_lock);                             \
 } while (0)
 
-#define PTLRPC_RS_DEBUG_LRU_DEL(rs)             \
-do {                                            \
-        cfs_spin_lock(&ptlrpc_rs_debug_lock);   \
-        cfs_list_del(&(rs)->rs_debug_list);     \
-        cfs_spin_unlock(&ptlrpc_rs_debug_lock); \
+#define PTLRPC_RS_DEBUG_LRU_DEL(rs)                                    \
+do {                                                                   \
+       spin_lock(&ptlrpc_rs_debug_lock);                               \
+       cfs_list_del(&(rs)->rs_debug_list);                             \
+       spin_unlock(&ptlrpc_rs_debug_lock);                             \
 } while (0)
 #else
 # define PTLRPC_RS_DEBUG_LRU_ADD(rs) do {} while(0)
@@ -286,14 +286,14 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
 {
        struct ptlrpc_reply_state *rs = NULL;
 
-       cfs_spin_lock(&svcpt->scp_rep_lock);
+       spin_lock(&svcpt->scp_rep_lock);
 
        /* See if we have anything in a pool, and wait if nothing */
        while (cfs_list_empty(&svcpt->scp_rep_idle)) {
                struct l_wait_info      lwi;
                int                     rc;
 
-               cfs_spin_unlock(&svcpt->scp_rep_lock);
+               spin_unlock(&svcpt->scp_rep_lock);
                /* If we cannot get anything for some long time, we better
                 * bail out instead of waiting infinitely */
                lwi = LWI_TIMEOUT(cfs_time_seconds(10), NULL, NULL);
@@ -301,14 +301,14 @@ lustre_get_emerg_rs(struct ptlrpc_service_part *svcpt)
                                  !cfs_list_empty(&svcpt->scp_rep_idle), &lwi);
                if (rc != 0)
                        goto out;
-               cfs_spin_lock(&svcpt->scp_rep_lock);
+               spin_lock(&svcpt->scp_rep_lock);
        }
 
        rs = cfs_list_entry(svcpt->scp_rep_idle.next,
                            struct ptlrpc_reply_state, rs_list);
        cfs_list_del(&rs->rs_list);
 
-       cfs_spin_unlock(&svcpt->scp_rep_lock);
+       spin_unlock(&svcpt->scp_rep_lock);
 
        LASSERT(rs != NULL);
        memset(rs, 0, svcpt->scp_service->srv_max_reply_size);
@@ -322,9 +322,9 @@ void lustre_put_emerg_rs(struct ptlrpc_reply_state *rs)
 {
        struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
 
-       cfs_spin_lock(&svcpt->scp_rep_lock);
+       spin_lock(&svcpt->scp_rep_lock);
        cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
-       cfs_spin_unlock(&svcpt->scp_rep_lock);
+       spin_unlock(&svcpt->scp_rep_lock);
        cfs_waitq_signal(&svcpt->scp_rep_waitq);
 }
 
@@ -338,9 +338,9 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
         LASSERT(req->rq_reply_state == NULL);
 
         if ((flags & LPRFL_EARLY_REPLY) == 0) {
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_packed_final = 1;
-                cfs_spin_unlock(&req->rq_lock);
+               spin_lock(&req->rq_lock);
+               req->rq_packed_final = 1;
+               spin_unlock(&req->rq_lock);
         }
 
         msg_len = lustre_msg_size_v2(count, lens);
@@ -356,7 +356,7 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
         CFS_INIT_LIST_HEAD(&rs->rs_exp_list);
         CFS_INIT_LIST_HEAD(&rs->rs_obd_list);
         CFS_INIT_LIST_HEAD(&rs->rs_list);
-        cfs_spin_lock_init(&rs->rs_lock);
+       spin_lock_init(&rs->rs_lock);
 
         req->rq_replen = msg_len;
         req->rq_reply_state = rs;
index 3490b9d..ecf3a9b 100644 (file)
@@ -48,7 +48,7 @@
 #include <obd_class.h>
 #include "ptlrpc_internal.h"
 
-cfs_mutex_t pinger_mutex;
+struct mutex pinger_mutex;
 static CFS_LIST_HEAD(pinger_imports);
 static cfs_list_t timeout_list = CFS_LIST_HEAD_INIT(timeout_list);
 struct ptlrpc_request *
@@ -147,14 +147,14 @@ cfs_duration_t pinger_check_timeout(cfs_time_t time)
         cfs_time_t timeout = PING_INTERVAL;
 
         /* The timeout list is a increase order sorted list */
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
                 int ti_timeout = item->ti_timeout;
                 if (timeout > ti_timeout)
                         timeout = ti_timeout;
                 break;
         }
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
 
         return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
                                          cfs_time_current());
@@ -225,14 +225,14 @@ int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req)
 static void ptlrpc_pinger_process_import(struct obd_import *imp,
                                          unsigned long this_ping)
 {
-        int force, level;
+       int force, level;
 
-        cfs_spin_lock(&imp->imp_lock);
-        level = imp->imp_state;
-        force = imp->imp_force_verify;
-        if (force)
-                imp->imp_force_verify = 0;
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       level = imp->imp_state;
+       force = imp->imp_force_verify;
+       if (force)
+               imp->imp_force_verify = 0;
+       spin_unlock(&imp->imp_lock);
 
         CDEBUG(level == LUSTRE_IMP_FULL ? D_INFO : D_HA,
                "level %s/%u force %u deactive %u pingable %u\n",
@@ -279,7 +279,7 @@ static int ptlrpc_pinger_main(void *arg)
                 struct timeout_item *item;
                 cfs_list_t *iter;
 
-                cfs_mutex_lock(&pinger_mutex);
+               mutex_lock(&pinger_mutex);
                 cfs_list_for_each_entry(item, &timeout_list, ti_chain) {
                         item->ti_cb(item, item->ti_cb_data);
                 }
@@ -296,7 +296,7 @@ static int ptlrpc_pinger_main(void *arg)
                                                         cfs_time_seconds(PING_INTERVAL))))
                                 ptlrpc_update_next_ping(imp, 0);
                 }
-                cfs_mutex_unlock(&pinger_mutex);
+               mutex_unlock(&pinger_mutex);
                 /* update memory usage info */
                 obd_update_maxusage();
 
@@ -390,10 +390,10 @@ int ptlrpc_stop_pinger(void)
                 RETURN(-EALREADY);
 
         ptlrpc_pinger_remove_timeouts();
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         thread_set_flags(pinger_thread, SVC_STOPPING);
         cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
 
         l_wait_event(pinger_thread->t_ctl_waitq,
                      thread_is_stopped(pinger_thread), &lwi);
@@ -420,7 +420,7 @@ int ptlrpc_pinger_add_import(struct obd_import *imp)
         if (!cfs_list_empty(&imp->imp_pinger_chain))
                 RETURN(-EALREADY);
 
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         CDEBUG(D_HA, "adding pingable import %s->%s\n",
                imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
         /* if we add to pinger we want recovery on this import */
@@ -431,7 +431,7 @@ int ptlrpc_pinger_add_import(struct obd_import *imp)
         class_import_get(imp);
 
         ptlrpc_pinger_wake_up();
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
 
         RETURN(0);
 }
@@ -443,14 +443,14 @@ int ptlrpc_pinger_del_import(struct obd_import *imp)
         if (cfs_list_empty(&imp->imp_pinger_chain))
                 RETURN(-ENOENT);
 
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         cfs_list_del_init(&imp->imp_pinger_chain);
         CDEBUG(D_HA, "removing pingable import %s->%s\n",
                imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
         /* if we remove from pinger we don't want recovery on this import */
         imp->imp_obd->obd_no_recov = 1;
         class_import_put(imp);
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
         RETURN(0);
 }
 EXPORT_SYMBOL(ptlrpc_pinger_del_import);
@@ -517,14 +517,14 @@ int ptlrpc_add_timeout_client(int time, enum timeout_event event,
 {
         struct timeout_item *ti;
 
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         ti = ptlrpc_pinger_register_timeout(time, event, cb, data);
         if (!ti) {
-                cfs_mutex_unlock(&pinger_mutex);
+               mutex_unlock(&pinger_mutex);
                 return (-EINVAL);
         }
         cfs_list_add(obd_list, &ti->ti_obd_list);
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
         return 0;
 }
 EXPORT_SYMBOL(ptlrpc_add_timeout_client);
@@ -536,7 +536,7 @@ int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
 
         if (cfs_list_empty(obd_list))
                 return 0;
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         cfs_list_del_init(obd_list);
         /**
          * If there are no obd attached to the timeout event
@@ -553,7 +553,7 @@ int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
                 cfs_list_del(&ti->ti_chain);
                 OBD_FREE_PTR(ti);
         }
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
         return 0;
 }
 EXPORT_SYMBOL(ptlrpc_del_timeout_client);
@@ -562,13 +562,13 @@ int ptlrpc_pinger_remove_timeouts(void)
 {
         struct timeout_item *item, *tmp;
 
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         cfs_list_for_each_entry_safe(item, tmp, &timeout_list, ti_chain) {
                 LASSERT(cfs_list_empty(&item->ti_obd_list));
                 cfs_list_del(&item->ti_chain);
                 OBD_FREE_PTR(item);
         }
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
         return 0;
 }
 
@@ -592,24 +592,24 @@ static DEFINE_SPINLOCK(pet_lock);
 
 int ping_evictor_wake(struct obd_export *exp)
 {
-        struct obd_device *obd;
+       struct obd_device *obd;
 
-        cfs_spin_lock(&pet_lock);
-        if (pet_state != PET_READY) {
-                /* eventually the new obd will call here again. */
-                cfs_spin_unlock(&pet_lock);
-                return 1;
-        }
+       spin_lock(&pet_lock);
+       if (pet_state != PET_READY) {
+               /* eventually the new obd will call here again. */
+               spin_unlock(&pet_lock);
+               return 1;
+       }
 
-        obd = class_exp2obd(exp);
-        if (cfs_list_empty(&obd->obd_evict_list)) {
-                class_incref(obd, "evictor", obd);
-                cfs_list_add(&obd->obd_evict_list, &pet_list);
-        }
-        cfs_spin_unlock(&pet_lock);
+       obd = class_exp2obd(exp);
+       if (cfs_list_empty(&obd->obd_evict_list)) {
+               class_incref(obd, "evictor", obd);
+               cfs_list_add(&obd->obd_evict_list, &pet_list);
+       }
+       spin_unlock(&pet_lock);
 
-        cfs_waitq_signal(&pet_waitq);
-        return 0;
+       cfs_waitq_signal(&pet_waitq);
+       return 0;
 }
 
 static int ping_evictor_main(void *arg)
@@ -635,10 +635,10 @@ static int ping_evictor_main(void *arg)
                 /* we only get here if pet_exp != NULL, and the end of this
                  * loop is the only place which sets it NULL again, so lock
                  * is not strictly necessary. */
-                cfs_spin_lock(&pet_lock);
-                obd = cfs_list_entry(pet_list.next, struct obd_device,
-                                     obd_evict_list);
-                cfs_spin_unlock(&pet_lock);
+               spin_lock(&pet_lock);
+               obd = cfs_list_entry(pet_list.next, struct obd_device,
+                                    obd_evict_list);
+               spin_unlock(&pet_lock);
 
                 expire_time = cfs_time_current_sec() - PING_EVICT_TIMEOUT;
 
@@ -649,15 +649,15 @@ static int ping_evictor_main(void *arg)
                  * the obd lock (class_unlink_export), which means we can't
                  * lose the last ref on the export.  If they've already been
                  * removed from the list, we won't find them here. */
-                cfs_spin_lock(&obd->obd_dev_lock);
-                while (!cfs_list_empty(&obd->obd_exports_timed)) {
-                        exp = cfs_list_entry(obd->obd_exports_timed.next,
-                                             struct obd_export,
-                                             exp_obd_chain_timed);
-                        if (expire_time > exp->exp_last_request_time) {
-                                class_export_get(exp);
-                                cfs_spin_unlock(&obd->obd_dev_lock);
-                                 LCONSOLE_WARN("%s: haven't heard from client %s"
+               spin_lock(&obd->obd_dev_lock);
+               while (!cfs_list_empty(&obd->obd_exports_timed)) {
+                       exp = cfs_list_entry(obd->obd_exports_timed.next,
+                                            struct obd_export,
+                                            exp_obd_chain_timed);
+                       if (expire_time > exp->exp_last_request_time) {
+                               class_export_get(exp);
+                               spin_unlock(&obd->obd_dev_lock);
+                               LCONSOLE_WARN("%s: haven't heard from client %s"
                                               " (at %s) in %ld seconds. I think"
                                               " it's dead, and I am evicting"
                                               " it. exp %p, cur %ld expire %ld"
@@ -674,17 +674,17 @@ static int ping_evictor_main(void *arg)
                                        exp->exp_last_request_time);
                                 class_fail_export(exp);
                                 class_export_put(exp);
-                                cfs_spin_lock(&obd->obd_dev_lock);
-                        } else {
-                                /* List is sorted, so everyone below is ok */
-                                break;
-                        }
-                }
-                cfs_spin_unlock(&obd->obd_dev_lock);
-
-                cfs_spin_lock(&pet_lock);
-                cfs_list_del_init(&obd->obd_evict_list);
-                cfs_spin_unlock(&pet_lock);
+                               spin_lock(&obd->obd_dev_lock);
+                       } else {
+                               /* List is sorted, so everyone below is ok */
+                               break;
+                       }
+               }
+               spin_unlock(&obd->obd_dev_lock);
+
+               spin_lock(&pet_lock);
+               cfs_list_del_init(&obd->obd_evict_list);
+               spin_unlock(&pet_lock);
 
                 class_decref(obd, "evictor", obd);
         }
@@ -771,7 +771,7 @@ static int pinger_check_rpcs(void *arg)
         set = pd->pd_set;
 
         /* add rpcs into set */
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         cfs_list_for_each(iter, &pinger_imports) {
                 struct obd_import *imp = cfs_list_entry(iter, struct obd_import,
                                                         imp_pinger_chain);
@@ -780,10 +780,10 @@ static int pinger_check_rpcs(void *arg)
                 if (cfs_time_aftereq(pd->pd_this_ping,
                                      imp->imp_next_ping - 5 * CFS_TICK)) {
                         /* Add a ping. */
-                        cfs_spin_lock(&imp->imp_lock);
-                        generation = imp->imp_generation;
-                        level = imp->imp_state;
-                        cfs_spin_unlock(&imp->imp_lock);
+                       spin_lock(&imp->imp_lock);
+                       generation = imp->imp_generation;
+                       level = imp->imp_state;
+                       spin_unlock(&imp->imp_lock);
 
                         if (level != LUSTRE_IMP_FULL) {
                                 CDEBUG(D_HA,
@@ -815,7 +815,7 @@ static int pinger_check_rpcs(void *arg)
                 }
         }
         pd->pd_this_ping = curtime;
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
 
         /* Might be empty, that's OK. */
         if (cfs_atomic_read(&set->set_remaining) == 0)
@@ -843,7 +843,7 @@ do_check_set:
         }
 
         /* Expire all the requests that didn't come back. */
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         cfs_list_for_each(iter, &set->set_requests) {
                 req = cfs_list_entry(iter, struct ptlrpc_request,
                                      rq_set_chain);
@@ -861,15 +861,15 @@ do_check_set:
                  * phase and take care of inflights. */
                 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
                 imp = req->rq_import;
-                cfs_spin_lock(&imp->imp_lock);
-                if (!cfs_list_empty(&req->rq_list)) {
-                        cfs_list_del_init(&req->rq_list);
-                        cfs_atomic_dec(&imp->imp_inflight);
-                }
-                cfs_spin_unlock(&imp->imp_lock);
-                cfs_atomic_dec(&set->set_remaining);
-        }
-        cfs_mutex_unlock(&pinger_mutex);
+               spin_lock(&imp->imp_lock);
+               if (!cfs_list_empty(&req->rq_list)) {
+                       cfs_list_del_init(&req->rq_list);
+                       cfs_atomic_dec(&imp->imp_inflight);
+               }
+               spin_unlock(&imp->imp_lock);
+               cfs_atomic_dec(&set->set_remaining);
+       }
+       mutex_unlock(&pinger_mutex);
 
         ptlrpc_set_destroy(set);
         pd->pd_set = NULL;
@@ -910,7 +910,7 @@ int ptlrpc_stop_pinger(void)
 void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
 {
 #ifdef ENABLE_PINGER
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         ptlrpc_update_next_ping(imp, 0);
         if (pinger_args.pd_set == NULL &&
             cfs_time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
@@ -918,14 +918,14 @@ void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
                         imp->imp_next_ping, cfs_time_current());
                 pinger_args.pd_next_ping = imp->imp_next_ping;
         }
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
 #endif
 }
 
 void ptlrpc_pinger_commit_expected(struct obd_import *imp)
 {
 #ifdef ENABLE_PINGER
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         ptlrpc_update_next_ping(imp, 1);
         if (pinger_args.pd_set == NULL &&
             cfs_time_before(imp->imp_next_ping, pinger_args.pd_next_ping)) {
@@ -933,7 +933,7 @@ void ptlrpc_pinger_commit_expected(struct obd_import *imp)
                         imp->imp_next_ping, cfs_time_current());
                 pinger_args.pd_next_ping = imp->imp_next_ping;
         }
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
 #endif
 }
 
@@ -960,10 +960,10 @@ int ptlrpc_pinger_add_import(struct obd_import *imp)
                imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
         ptlrpc_pinger_sending_on_import(imp);
 
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         cfs_list_add_tail(&imp->imp_pinger_chain, &pinger_imports);
         class_import_get(imp);
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
 
         RETURN(0);
 }
@@ -974,12 +974,12 @@ int ptlrpc_pinger_del_import(struct obd_import *imp)
         if (cfs_list_empty(&imp->imp_pinger_chain))
                 RETURN(-ENOENT);
 
-        cfs_mutex_lock(&pinger_mutex);
+       mutex_lock(&pinger_mutex);
         cfs_list_del_init(&imp->imp_pinger_chain);
         CDEBUG(D_HA, "removing pingable import %s->%s\n",
                imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
         class_import_put(imp);
-        cfs_mutex_unlock(&pinger_mutex);
+       mutex_unlock(&pinger_mutex);
         RETURN(0);
 }
 
index 5f05852..cac53df 100644 (file)
 
 #include "ptlrpc_internal.h"
 
-extern cfs_spinlock_t ptlrpc_last_xid_lock;
+extern spinlock_t ptlrpc_last_xid_lock;
 #if RS_DEBUG
-extern cfs_spinlock_t ptlrpc_rs_debug_lock;
+extern spinlock_t ptlrpc_rs_debug_lock;
 #endif
-extern cfs_spinlock_t ptlrpc_all_services_lock;
-extern cfs_mutex_t pinger_mutex;
-extern cfs_mutex_t ptlrpcd_mutex;
+extern spinlock_t ptlrpc_all_services_lock;
+extern struct mutex pinger_mutex;
+extern struct mutex ptlrpcd_mutex;
 
 __init int ptlrpc_init(void)
 {
@@ -62,11 +62,11 @@ __init int ptlrpc_init(void)
 
         lustre_assert_wire_constants();
 #if RS_DEBUG
-        cfs_spin_lock_init(&ptlrpc_rs_debug_lock);
+       spin_lock_init(&ptlrpc_rs_debug_lock);
 #endif
-        cfs_spin_lock_init(&ptlrpc_all_services_lock);
-        cfs_mutex_init(&pinger_mutex);
-        cfs_mutex_init(&ptlrpcd_mutex);
+       spin_lock_init(&ptlrpc_all_services_lock);
+       mutex_init(&pinger_mutex);
+       mutex_init(&ptlrpcd_mutex);
         ptlrpc_init_xid();
 
         rc = req_layout_init();
index 861e3cd..5db8f92 100644 (file)
@@ -92,7 +92,7 @@ CFS_MODULE_PARM(ptlrpcd_bind_policy, "i", int, 0644,
 #endif
 static struct ptlrpcd *ptlrpcds;
 
-cfs_mutex_t ptlrpcd_mutex;
+struct mutex ptlrpcd_mutex;
 static int ptlrpcd_users = 0;
 
 void ptlrpcd_wake(struct ptlrpc_request *req)
@@ -183,12 +183,12 @@ void ptlrpcd_add_rqset(struct ptlrpc_request_set *set)
         }
 
 #ifdef __KERNEL__
-        cfs_spin_lock(&new->set_new_req_lock);
-        cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
-        i = cfs_atomic_read(&set->set_remaining);
-        count = cfs_atomic_add_return(i, &new->set_new_count);
-        cfs_atomic_set(&set->set_remaining, 0);
-        cfs_spin_unlock(&new->set_new_req_lock);
+       spin_lock(&new->set_new_req_lock);
+       cfs_list_splice_init(&set->set_requests, &new->set_new_requests);
+       i = cfs_atomic_read(&set->set_remaining);
+       count = cfs_atomic_add_return(i, &new->set_new_count);
+       cfs_atomic_set(&set->set_remaining, 0);
+       spin_unlock(&new->set_new_req_lock);
         if (count == i) {
                 cfs_waitq_signal(&new->set_waitq);
 
@@ -213,7 +213,7 @@ static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
         struct ptlrpc_request *req;
         int rc = 0;
 
-        cfs_spin_lock(&src->set_new_req_lock);
+       spin_lock(&src->set_new_req_lock);
         if (likely(!cfs_list_empty(&src->set_new_requests))) {
                 cfs_list_for_each_safe(pos, tmp, &src->set_new_requests) {
                         req = cfs_list_entry(pos, struct ptlrpc_request,
@@ -226,8 +226,8 @@ static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des,
                 cfs_atomic_add(rc, &des->set_remaining);
                 cfs_atomic_set(&src->set_new_count, 0);
         }
-        cfs_spin_unlock(&src->set_new_req_lock);
-        return rc;
+       spin_unlock(&src->set_new_req_lock);
+       return rc;
 }
 #endif
 
@@ -242,13 +242,13 @@ void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
        if (req->rq_reqmsg)
                lustre_msg_set_jobid(req->rq_reqmsg, NULL);
 
-        cfs_spin_lock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
         if (req->rq_invalid_rqset) {
                 struct l_wait_info lwi = LWI_TIMEOUT(cfs_time_seconds(5),
                                                      back_to_sleep, NULL);
 
                 req->rq_invalid_rqset = 0;
-                cfs_spin_unlock(&req->rq_lock);
+               spin_unlock(&req->rq_lock);
                 l_wait_event(req->rq_set_waitq, (req->rq_set == NULL), &lwi);
         } else if (req->rq_set) {
                 /* If we have a vaid "rq_set", just reuse it to avoid double
@@ -258,11 +258,11 @@ void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx)
 
                 /* ptlrpc_check_set will decrease the count */
                 cfs_atomic_inc(&req->rq_set->set_remaining);
-                cfs_spin_unlock(&req->rq_lock);
-                cfs_waitq_signal(&req->rq_set->set_waitq);
-                return;
-        } else {
-                cfs_spin_unlock(&req->rq_lock);
+               spin_unlock(&req->rq_lock);
+               cfs_waitq_signal(&req->rq_set->set_waitq);
+               return;
+       } else {
+               spin_unlock(&req->rq_lock);
         }
 
         pc = ptlrpcd_select_pc(req, policy, idx);
@@ -293,7 +293,7 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
         ENTRY;
 
         if (cfs_atomic_read(&set->set_new_count)) {
-                cfs_spin_lock(&set->set_new_req_lock);
+               spin_lock(&set->set_new_req_lock);
                 if (likely(!cfs_list_empty(&set->set_new_requests))) {
                         cfs_list_splice_init(&set->set_new_requests,
                                              &set->set_requests);
@@ -305,7 +305,7 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
                          */
                         rc = 1;
                 }
-                cfs_spin_unlock(&set->set_new_req_lock);
+               spin_unlock(&set->set_new_req_lock);
         }
 
         /* We should call lu_env_refill() before handling new requests to make
@@ -368,15 +368,15 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
                                 if (partner == NULL)
                                         continue;
 
-                                cfs_spin_lock(&partner->pc_lock);
-                                ps = partner->pc_set;
-                                if (ps == NULL) {
-                                        cfs_spin_unlock(&partner->pc_lock);
-                                        continue;
-                                }
+                               spin_lock(&partner->pc_lock);
+                               ps = partner->pc_set;
+                               if (ps == NULL) {
+                                       spin_unlock(&partner->pc_lock);
+                                       continue;
+                               }
 
-                                ptlrpc_reqset_get(ps);
-                                cfs_spin_unlock(&partner->pc_lock);
+                               ptlrpc_reqset_get(ps);
+                               spin_unlock(&partner->pc_lock);
 
                                 if (cfs_atomic_read(&ps->set_new_count)) {
                                         rc = ptlrpcd_steal_rqset(set, ps);
@@ -412,7 +412,7 @@ static int ptlrpcd(void *arg)
 
         cfs_daemonize_ctxt(pc->pc_name);
 #if defined(CONFIG_SMP) && defined(HAVE_NODE_TO_CPUMASK)
-        if (cfs_test_bit(LIOD_BIND, &pc->pc_flags)) {
+       if (test_bit(LIOD_BIND, &pc->pc_flags)) {
                 int index = pc->pc_index;
 
                 if (index >= 0 && index < cfs_num_possible_cpus()) {
@@ -432,7 +432,7 @@ static int ptlrpcd(void *arg)
          */
         rc = lu_context_init(&env.le_ctx,
                              LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
-        cfs_complete(&pc->pc_starting);
+       complete(&pc->pc_starting);
 
         if (rc != 0)
                 RETURN(rc);
@@ -459,8 +459,8 @@ static int ptlrpcd(void *arg)
                 /*
                  * Abort inflight rpcs for forced stop case.
                  */
-                if (cfs_test_bit(LIOD_STOP, &pc->pc_flags)) {
-                        if (cfs_test_bit(LIOD_FORCE, &pc->pc_flags))
+               if (test_bit(LIOD_STOP, &pc->pc_flags)) {
+                       if (test_bit(LIOD_FORCE, &pc->pc_flags))
                                 ptlrpc_abort_set(set);
                         exit++;
                 }
@@ -478,12 +478,12 @@ static int ptlrpcd(void *arg)
                 ptlrpc_set_wait(set);
         lu_context_fini(&env.le_ctx);
 
-        cfs_clear_bit(LIOD_START, &pc->pc_flags);
-        cfs_clear_bit(LIOD_STOP, &pc->pc_flags);
-        cfs_clear_bit(LIOD_FORCE, &pc->pc_flags);
-        cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
+       clear_bit(LIOD_START, &pc->pc_flags);
+       clear_bit(LIOD_STOP, &pc->pc_flags);
+       clear_bit(LIOD_FORCE, &pc->pc_flags);
+       clear_bit(LIOD_BIND, &pc->pc_flags);
 
-        cfs_complete(&pc->pc_finishing);
+       complete(&pc->pc_finishing);
 
         return 0;
 }
@@ -543,7 +543,7 @@ static int ptlrpcd_bind(int index, int max)
                 break;
         case PDB_POLICY_FULL:
                 pc->pc_npartners = 0;
-                cfs_set_bit(LIOD_BIND, &pc->pc_flags);
+               set_bit(LIOD_BIND, &pc->pc_flags);
                 break;
         case PDB_POLICY_PAIR:
                 LASSERT(max % 2 == 0);
@@ -556,7 +556,7 @@ static int ptlrpcd_bind(int index, int max)
                 for (i = max; i < cfs_num_online_cpus(); i++)
                         cpu_clear(i, mask);
                 pc->pc_npartners = cpus_weight(mask) - 1;
-                cfs_set_bit(LIOD_BIND, &pc->pc_flags);
+               set_bit(LIOD_BIND, &pc->pc_flags);
 #else
                 LASSERT(max >= 3);
                 pc->pc_npartners = 2;
@@ -577,7 +577,7 @@ static int ptlrpcd_bind(int index, int max)
                         switch (ptlrpcd_bind_policy) {
                         case PDB_POLICY_PAIR:
                                 if (index & 0x1) {
-                                        cfs_set_bit(LIOD_BIND, &pc->pc_flags);
+                                       set_bit(LIOD_BIND, &pc->pc_flags);
                                         pc->pc_partners[0] = &ptlrpcds->
                                                 pd_threads[index - 1];
                                         ptlrpcds->pd_threads[index - 1].
@@ -603,7 +603,7 @@ static int ptlrpcd_bind(int index, int max)
                                 pc->pc_npartners = pidx;
 #else
                                 if (index & 0x1)
-                                        cfs_set_bit(LIOD_BIND, &pc->pc_flags);
+                                       set_bit(LIOD_BIND, &pc->pc_flags);
                                 if (index > 0) {
                                         pc->pc_partners[0] = &ptlrpcds->
                                                 pd_threads[index - 1];
@@ -652,7 +652,7 @@ int ptlrpcd_check_async_rpcs(void *arg)
                         /*
                          * XXX: send replay requests.
                          */
-                        if (cfs_test_bit(LIOD_RECOVERY, &pc->pc_flags))
+                       if (test_bit(LIOD_RECOVERY, &pc->pc_flags))
                                 rc = ptlrpcd_check(&pc->pc_env, pc);
                         lu_context_exit(&pc->pc_env.le_ctx);
                 }
@@ -681,16 +681,16 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
         /*
          * Do not allow start second thread for one pc.
          */
-        if (cfs_test_and_set_bit(LIOD_START, &pc->pc_flags)) {
-                CWARN("Starting second thread (%s) for same pc %p\n",
-                       name, pc);
-                RETURN(0);
-        }
-
-        pc->pc_index = index;
-        cfs_init_completion(&pc->pc_starting);
-        cfs_init_completion(&pc->pc_finishing);
-        cfs_spin_lock_init(&pc->pc_lock);
+       if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
+               CWARN("Starting second thread (%s) for same pc %p\n",
+                     name, pc);
+               RETURN(0);
+       }
+
+       pc->pc_index = index;
+       init_completion(&pc->pc_starting);
+       init_completion(&pc->pc_finishing);
+       spin_lock_init(&pc->pc_lock);
         strncpy(pc->pc_name, name, sizeof(pc->pc_name) - 1);
         pc->pc_set = ptlrpc_prep_set();
         if (pc->pc_set == NULL)
@@ -717,7 +717,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
                 GOTO(out, rc);
 
         rc = 0;
-        cfs_wait_for_completion(&pc->pc_starting);
+       wait_for_completion(&pc->pc_starting);
 #else
         pc->pc_wait_callback =
                 liblustre_register_wait_callback("ptlrpcd_check_async_rpcs",
@@ -732,20 +732,20 @@ out:
                 if (pc->pc_set != NULL) {
                         struct ptlrpc_request_set *set = pc->pc_set;
 
-                        cfs_spin_lock(&pc->pc_lock);
-                        pc->pc_set = NULL;
-                        cfs_spin_unlock(&pc->pc_lock);
-                        ptlrpc_set_destroy(set);
-                }
-                if (env != 0)
-                        lu_context_fini(&pc->pc_env.le_ctx);
-                cfs_clear_bit(LIOD_BIND, &pc->pc_flags);
+                       spin_lock(&pc->pc_lock);
+                       pc->pc_set = NULL;
+                       spin_unlock(&pc->pc_lock);
+                       ptlrpc_set_destroy(set);
+               }
+               if (env != 0)
+                       lu_context_fini(&pc->pc_env.le_ctx);
+               clear_bit(LIOD_BIND, &pc->pc_flags);
 #else
-                SET_BUT_UNUSED(env);
+               SET_BUT_UNUSED(env);
 #endif
-                cfs_clear_bit(LIOD_START, &pc->pc_flags);
-        }
-        RETURN(rc);
+               clear_bit(LIOD_START, &pc->pc_flags);
+       }
+       RETURN(rc);
 }
 
 void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
@@ -753,27 +753,27 @@ void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
        struct ptlrpc_request_set *set = pc->pc_set;
         ENTRY;
 
-        if (!cfs_test_bit(LIOD_START, &pc->pc_flags)) {
+       if (!test_bit(LIOD_START, &pc->pc_flags)) {
                 CWARN("Thread for pc %p was not started\n", pc);
                 goto out;
         }
 
-        cfs_set_bit(LIOD_STOP, &pc->pc_flags);
-        if (force)
-                cfs_set_bit(LIOD_FORCE, &pc->pc_flags);
-        cfs_waitq_signal(&pc->pc_set->set_waitq);
+       set_bit(LIOD_STOP, &pc->pc_flags);
+       if (force)
+               set_bit(LIOD_FORCE, &pc->pc_flags);
+       cfs_waitq_signal(&pc->pc_set->set_waitq);
 #ifdef __KERNEL__
-        cfs_wait_for_completion(&pc->pc_finishing);
+       wait_for_completion(&pc->pc_finishing);
 #else
-        liblustre_deregister_wait_callback(pc->pc_wait_callback);
-        liblustre_deregister_idle_callback(pc->pc_idle_callback);
+       liblustre_deregister_wait_callback(pc->pc_wait_callback);
+       liblustre_deregister_idle_callback(pc->pc_idle_callback);
 #endif
-        lu_context_fini(&pc->pc_env.le_ctx);
+       lu_context_fini(&pc->pc_env.le_ctx);
 
-        cfs_spin_lock(&pc->pc_lock);
-        pc->pc_set = NULL;
-        cfs_spin_unlock(&pc->pc_lock);
-        ptlrpc_set_destroy(set);
+       spin_lock(&pc->pc_lock);
+       pc->pc_set = NULL;
+       spin_unlock(&pc->pc_lock);
+       ptlrpc_set_destroy(set);
 
 out:
 #ifdef __KERNEL__
@@ -831,7 +831,7 @@ static int ptlrpcd_init(void)
                 GOTO(out, rc = -ENOMEM);
 
         snprintf(name, 15, "ptlrpcd_rcv");
-        cfs_set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
+       set_bit(LIOD_RECOVERY, &ptlrpcds->pd_thread_rcv.pc_flags);
         rc = ptlrpcd_start(-1, nthreads, name, &ptlrpcds->pd_thread_rcv);
         if (rc < 0)
                 GOTO(out, rc);
@@ -876,20 +876,20 @@ int ptlrpcd_addref(void)
         int rc = 0;
         ENTRY;
 
-        cfs_mutex_lock(&ptlrpcd_mutex);
+       mutex_lock(&ptlrpcd_mutex);
         if (++ptlrpcd_users == 1)
                 rc = ptlrpcd_init();
-        cfs_mutex_unlock(&ptlrpcd_mutex);
+       mutex_unlock(&ptlrpcd_mutex);
         RETURN(rc);
 }
 EXPORT_SYMBOL(ptlrpcd_addref);
 
 void ptlrpcd_decref(void)
 {
-        cfs_mutex_lock(&ptlrpcd_mutex);
+       mutex_lock(&ptlrpcd_mutex);
         if (--ptlrpcd_users == 0)
                 ptlrpcd_fini();
-        cfs_mutex_unlock(&ptlrpcd_mutex);
+       mutex_unlock(&ptlrpcd_mutex);
 }
 EXPORT_SYMBOL(ptlrpcd_decref);
 /** @} ptlrpcd */
index 710afd7..810aa02 100644 (file)
@@ -112,11 +112,11 @@ static struct llog_canceld_ctxt *llcd_alloc(struct llog_commit_master *lcm)
         llcd->llcd_cookiebytes = 0;
         llcd->llcd_size = size;
 
-        cfs_spin_lock(&lcm->lcm_lock);
-        llcd->llcd_lcm = lcm;
-        cfs_atomic_inc(&lcm->lcm_count);
-        cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
-        cfs_spin_unlock(&lcm->lcm_lock);
+       spin_lock(&lcm->lcm_lock);
+       llcd->llcd_lcm = lcm;
+       cfs_atomic_inc(&lcm->lcm_count);
+       cfs_list_add_tail(&llcd->llcd_list, &lcm->lcm_llcds);
+       spin_unlock(&lcm->lcm_lock);
         cfs_atomic_inc(&llcd_count);
 
         CDEBUG(D_RPCTRACE, "Alloc llcd %p on lcm %p (%d)\n",
@@ -139,11 +139,11 @@ static void llcd_free(struct llog_canceld_ctxt *llcd)
                         llcd_print(llcd, __FUNCTION__, __LINE__);
                         LBUG();
                 }
-                cfs_spin_lock(&lcm->lcm_lock);
-                LASSERT(!cfs_list_empty(&llcd->llcd_list));
-                cfs_list_del_init(&llcd->llcd_list);
-                cfs_atomic_dec(&lcm->lcm_count);
-                cfs_spin_unlock(&lcm->lcm_lock);
+               spin_lock(&lcm->lcm_lock);
+               LASSERT(!cfs_list_empty(&llcd->llcd_list));
+               cfs_list_del_init(&llcd->llcd_list);
+               cfs_atomic_dec(&lcm->lcm_count);
+               spin_unlock(&lcm->lcm_lock);
 
                 CDEBUG(D_RPCTRACE, "Free llcd %p on lcm %p (%d)\n",
                        llcd, lcm, cfs_atomic_read(&lcm->lcm_count));
@@ -230,7 +230,7 @@ static int llcd_send(struct llog_canceld_ctxt *llcd)
          * Check if we're in exit stage. Do not send llcd in
          * this case.
          */
-        if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+       if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
                 GOTO(exit, rc = -ENODEV);
 
         CDEBUG(D_RPCTRACE, "Sending llcd %p\n", llcd);
@@ -418,7 +418,7 @@ void llog_recov_thread_stop(struct llog_commit_master *lcm, int force)
          * Let all know that we're stopping. This will also make
          * llcd_send() refuse any new llcds.
          */
-        cfs_set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
+       set_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags);
 
         /*
          * Stop processing thread. No new rpcs will be accepted for
@@ -438,13 +438,13 @@ void llog_recov_thread_stop(struct llog_commit_master *lcm, int force)
                 CERROR("Busy llcds found (%d) on lcm %p\n",
                        cfs_atomic_read(&lcm->lcm_count), lcm);
 
-                cfs_spin_lock(&lcm->lcm_lock);
-                cfs_list_for_each(tmp, &lcm->lcm_llcds) {
-                        llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt,
-                                              llcd_list);
-                        llcd_print(llcd, __FUNCTION__, __LINE__);
-                }
-                cfs_spin_unlock(&lcm->lcm_lock);
+               spin_lock(&lcm->lcm_lock);
+               cfs_list_for_each(tmp, &lcm->lcm_llcds) {
+                       llcd = cfs_list_entry(tmp, struct llog_canceld_ctxt,
+                                             llcd_list);
+                       llcd_print(llcd, __func__, __LINE__);
+               }
+               spin_unlock(&lcm->lcm_lock);
 
                 /*
                  * No point to go further with busy llcds at this point
@@ -483,7 +483,7 @@ struct llog_commit_master *llog_recov_thread_init(char *name)
 
         cfs_atomic_set(&lcm->lcm_count, 0);
         cfs_atomic_set(&lcm->lcm_refcount, 1);
-        cfs_spin_lock_init(&lcm->lcm_lock);
+       spin_lock_init(&lcm->lcm_lock);
         CFS_INIT_LIST_HEAD(&lcm->lcm_llcds);
         rc = llog_recov_thread_start(lcm);
         if (rc) {
@@ -567,10 +567,10 @@ int llog_obd_repl_connect(struct llog_ctxt *ctxt,
         /*
          * Start recovery in separate thread.
          */
-        cfs_mutex_lock(&ctxt->loc_mutex);
+       mutex_lock(&ctxt->loc_mutex);
         ctxt->loc_gen = *gen;
         rc = llog_recov_thread_replay(ctxt, ctxt->llog_proc_cb, logid);
-        cfs_mutex_unlock(&ctxt->loc_mutex);
+       mutex_unlock(&ctxt->loc_mutex);
 
         RETURN(rc);
 }
@@ -592,7 +592,7 @@ int llog_obd_repl_cancel(const struct lu_env *env, struct llog_ctxt *ctxt,
 
         LASSERT(ctxt != NULL);
 
-        cfs_mutex_lock(&ctxt->loc_mutex);
+       mutex_lock(&ctxt->loc_mutex);
         if (!ctxt->loc_lcm) {
                 CDEBUG(D_RPCTRACE, "No lcm for ctxt %p\n", ctxt);
                 GOTO(out, rc = -ENODEV);
@@ -614,7 +614,7 @@ int llog_obd_repl_cancel(const struct lu_env *env, struct llog_ctxt *ctxt,
                 GOTO(out, rc = -ENODEV);
         }
 
-        if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
+       if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags)) {
                 CDEBUG(D_RPCTRACE, "Commit thread is stopping for ctxt %p\n",
                        ctxt);
                 GOTO(out, rc = -ENODEV);
@@ -634,7 +634,7 @@ int llog_obd_repl_cancel(const struct lu_env *env, struct llog_ctxt *ctxt,
                          * Allocation is successful, let's check for stop
                          * flag again to fall back as soon as possible.
                          */
-                        if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+                       if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
                                 GOTO(out, rc = -ENODEV);
                 }
 
@@ -653,7 +653,7 @@ int llog_obd_repl_cancel(const struct lu_env *env, struct llog_ctxt *ctxt,
                          * Allocation is successful, let's check for stop
                          * flag again to fall back as soon as possible.
                          */
-                        if (cfs_test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
+                       if (test_bit(LLOG_LCM_FL_EXIT, &lcm->lcm_flags))
                                 GOTO(out, rc = -ENODEV);
                 }
 
@@ -682,7 +682,7 @@ out:
        if (flags & OBD_LLOG_FL_EXIT)
                ctxt->loc_flags = LLOG_CTXT_FLAG_STOP;
 
-        cfs_mutex_unlock(&ctxt->loc_mutex);
+       mutex_unlock(&ctxt->loc_mutex);
         return rc;
 }
 EXPORT_SYMBOL(llog_obd_repl_cancel);
@@ -696,7 +696,7 @@ int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp,
         /*
          * Flush any remaining llcd.
          */
-        cfs_mutex_lock(&ctxt->loc_mutex);
+       mutex_lock(&ctxt->loc_mutex);
         if (exp && (ctxt->loc_imp == exp->exp_imp_reverse)) {
                 /*
                  * This is ost->mds connection, we can't be sure that mds
@@ -708,7 +708,7 @@ int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp,
                if (flags & OBD_LLOG_FL_EXIT)
                        ctxt->loc_flags = LLOG_CTXT_FLAG_STOP;
 
-                cfs_mutex_unlock(&ctxt->loc_mutex);
+               mutex_unlock(&ctxt->loc_mutex);
         } else {
                 /*
                  * This is either llog_sync() from generic llog code or sync
@@ -716,7 +716,7 @@ int llog_obd_repl_sync(struct llog_ctxt *ctxt, struct obd_export *exp,
                  * llcds to the target with waiting for completion.
                  */
                 CDEBUG(D_RPCTRACE, "Sync cached llcd\n");
-                cfs_mutex_unlock(&ctxt->loc_mutex);
+               mutex_unlock(&ctxt->loc_mutex);
                rc = llog_cancel(NULL, ctxt, NULL, 0, NULL,
                                 OBD_LLOG_FL_SENDNOW | flags);
         }
index 106bb95..ec6e6e0 100644 (file)
@@ -89,11 +89,11 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
         /* It might have committed some after we last spoke, so make sure we
          * get rid of them now.
          */
-        cfs_spin_lock(&imp->imp_lock);
-        imp->imp_last_transno_checked = 0;
-        ptlrpc_free_committed(imp);
-        last_transno = imp->imp_last_replay_transno;
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       imp->imp_last_transno_checked = 0;
+       ptlrpc_free_committed(imp);
+       last_transno = imp->imp_last_replay_transno;
+       spin_unlock(&imp->imp_lock);
 
         CDEBUG(D_HA, "import %p from %s committed "LPU64" last "LPU64"\n",
                imp, obd2cli_tgt(imp->imp_obd),
@@ -132,9 +132,9 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
                 req = NULL;
         }
 
-        cfs_spin_lock(&imp->imp_lock);
-        imp->imp_resend_replay = 0;
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       imp->imp_resend_replay = 0;
+       spin_unlock(&imp->imp_lock);
 
         if (req != NULL) {
                 rc = ptlrpc_replay_req(req);
@@ -164,9 +164,9 @@ int ptlrpc_resend(struct obd_import *imp)
          */
         /* Well... what if lctl recover is called twice at the same time?
          */
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_state != LUSTRE_IMP_RECOVER) {
-                cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_state != LUSTRE_IMP_RECOVER) {
+               spin_unlock(&imp->imp_lock);
                 RETURN(-1);
         }
 
@@ -178,9 +178,9 @@ int ptlrpc_resend(struct obd_import *imp)
                 if (!ptlrpc_no_resend(req))
                         ptlrpc_resend_req(req);
         }
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_unlock(&imp->imp_lock);
 
-        RETURN(0);
+       RETURN(0);
 }
 EXPORT_SYMBOL(ptlrpc_resend);
 
@@ -190,17 +190,17 @@ EXPORT_SYMBOL(ptlrpc_resend);
  */
 void ptlrpc_wake_delayed(struct obd_import *imp)
 {
-        cfs_list_t *tmp, *pos;
-        struct ptlrpc_request *req;
+       cfs_list_t *tmp, *pos;
+       struct ptlrpc_request *req;
 
-        cfs_spin_lock(&imp->imp_lock);
-        cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
-                req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
+       spin_lock(&imp->imp_lock);
+       cfs_list_for_each_safe(tmp, pos, &imp->imp_delayed_list) {
+               req = cfs_list_entry(tmp, struct ptlrpc_request, rq_list);
 
-                DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
-                ptlrpc_client_wake_req(req);
-        }
-        cfs_spin_unlock(&imp->imp_lock);
+               DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set);
+               ptlrpc_client_wake_req(req);
+       }
+       spin_unlock(&imp->imp_lock);
 }
 EXPORT_SYMBOL(ptlrpc_wake_delayed);
 
@@ -230,12 +230,12 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
 
         /* Wait for recovery to complete and resend. If evicted, then
            this request will be errored out later.*/
-        cfs_spin_lock(&failed_req->rq_lock);
-        if (!failed_req->rq_no_resend)
-                failed_req->rq_resend = 1;
-        cfs_spin_unlock(&failed_req->rq_lock);
+       spin_lock(&failed_req->rq_lock);
+       if (!failed_req->rq_no_resend)
+               failed_req->rq_resend = 1;
+       spin_unlock(&failed_req->rq_lock);
 
-        EXIT;
+       EXIT;
 }
 
 /**
@@ -261,9 +261,9 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active)
 
                 /* set before invalidate to avoid messages about imp_inval
                  * set without imp_deactive in ptlrpc_import_delay_req */
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_deactive = 1;
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               imp->imp_deactive = 1;
+               spin_unlock(&imp->imp_lock);
 
                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_DEACTIVATE);
 
@@ -275,9 +275,9 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active)
                 CDEBUG(D_HA, "setting import %s VALID\n",
                        obd2cli_tgt(imp->imp_obd));
 
-                cfs_spin_lock(&imp->imp_lock);
-                imp->imp_deactive = 0;
-                cfs_spin_unlock(&imp->imp_lock);
+               spin_lock(&imp->imp_lock);
+               imp->imp_deactive = 0;
+               spin_unlock(&imp->imp_lock);
                 obd_import_event(imp->imp_obd, imp, IMP_EVENT_ACTIVATE);
 
                 rc = ptlrpc_recover_import(imp, NULL, 0);
@@ -290,14 +290,14 @@ EXPORT_SYMBOL(ptlrpc_set_import_active);
 /* Attempt to reconnect an import */
 int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
 {
-        int rc = 0;
-        ENTRY;
-
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
-            cfs_atomic_read(&imp->imp_inval_count))
-                rc = -EINVAL;
-        cfs_spin_unlock(&imp->imp_lock);
+       int rc = 0;
+       ENTRY;
+
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
+           cfs_atomic_read(&imp->imp_inval_count))
+               rc = -EINVAL;
+       spin_unlock(&imp->imp_lock);
         if (rc)
                 GOTO(out, rc);
 
@@ -315,12 +315,12 @@ int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
         }
 
         /* Check if reconnect is already in progress */
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_state != LUSTRE_IMP_DISCON) {
-                imp->imp_force_verify = 1;
-                rc = -EALREADY;
-        }
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_state != LUSTRE_IMP_DISCON) {
+               imp->imp_force_verify = 1;
+               rc = -EALREADY;
+       }
+       spin_unlock(&imp->imp_lock);
         if (rc)
                 GOTO(out, rc);
 
@@ -350,12 +350,12 @@ EXPORT_SYMBOL(ptlrpc_recover_import);
 
 int ptlrpc_import_in_recovery(struct obd_import *imp)
 {
-        int in_recovery = 1;
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_state == LUSTRE_IMP_FULL ||
-            imp->imp_state == LUSTRE_IMP_CLOSED ||
-            imp->imp_state == LUSTRE_IMP_DISCON)
-                in_recovery = 0;
-        cfs_spin_unlock(&imp->imp_lock);
-        return in_recovery;
+       int in_recovery = 1;
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_state == LUSTRE_IMP_FULL ||
+           imp->imp_state == LUSTRE_IMP_CLOSED ||
+           imp->imp_state == LUSTRE_IMP_DISCON)
+               in_recovery = 0;
+       spin_unlock(&imp->imp_lock);
+       return in_recovery;
 }
index c4aa034..51092e6 100644 (file)
@@ -63,7 +63,7 @@
  * policy registers                            *
  ***********************************************/
 
-static cfs_rwlock_t policy_lock;
+static rwlock_t policy_lock;
 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
         NULL,
 };
@@ -79,13 +79,13 @@ int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
         if (number >= SPTLRPC_POLICY_MAX)
                 return -EINVAL;
 
-        cfs_write_lock(&policy_lock);
+       write_lock(&policy_lock);
         if (unlikely(policies[number])) {
-                cfs_write_unlock(&policy_lock);
+               write_unlock(&policy_lock);
                 return -EALREADY;
         }
         policies[number] = policy;
-        cfs_write_unlock(&policy_lock);
+       write_unlock(&policy_lock);
 
         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
         return 0;
@@ -98,16 +98,16 @@ int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
 
         LASSERT(number < SPTLRPC_POLICY_MAX);
 
-        cfs_write_lock(&policy_lock);
+       write_lock(&policy_lock);
         if (unlikely(policies[number] == NULL)) {
-                cfs_write_unlock(&policy_lock);
+               write_unlock(&policy_lock);
                 CERROR("%s: already unregistered\n", policy->sp_name);
                 return -EINVAL;
         }
 
         LASSERT(policies[number] == policy);
         policies[number] = NULL;
-        cfs_write_unlock(&policy_lock);
+       write_unlock(&policy_lock);
 
         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
         return 0;
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(sptlrpc_unregister_policy);
 static
 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
 {
-        static CFS_DEFINE_MUTEX(load_mutex);
+       static DEFINE_MUTEX(load_mutex);
         static cfs_atomic_t       loaded = CFS_ATOMIC_INIT(0);
         struct ptlrpc_sec_policy *policy;
         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
@@ -127,20 +127,20 @@ struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
                 return NULL;
 
         while (1) {
-                cfs_read_lock(&policy_lock);
+               read_lock(&policy_lock);
                 policy = policies[number];
                 if (policy && !cfs_try_module_get(policy->sp_owner))
                         policy = NULL;
                 if (policy == NULL)
                         flag = cfs_atomic_read(&loaded);
-                cfs_read_unlock(&policy_lock);
+               read_unlock(&policy_lock);
 
                 if (policy != NULL || flag != 0 ||
                     number != SPTLRPC_POLICY_GSS)
                         break;
 
                 /* try to load gss module, once */
-                cfs_mutex_lock(&load_mutex);
+               mutex_lock(&load_mutex);
                 if (cfs_atomic_read(&loaded) == 0) {
                         if (cfs_request_module("ptlrpc_gss") == 0)
                                 CDEBUG(D_SEC,
@@ -150,7 +150,7 @@ struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
 
                         cfs_atomic_set(&loaded, 1);
                 }
-                cfs_mutex_unlock(&load_mutex);
+               mutex_unlock(&load_mutex);
         }
 
         return policy;
@@ -321,15 +321,15 @@ EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
  */
 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
 {
-        struct ptlrpc_request *req, *next;
+       struct ptlrpc_request *req, *next;
 
-        cfs_spin_lock(&ctx->cc_lock);
-        cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
-                                     rq_ctx_chain) {
-                cfs_list_del_init(&req->rq_ctx_chain);
-                ptlrpc_client_wake_req(req);
-        }
-        cfs_spin_unlock(&ctx->cc_lock);
+       spin_lock(&ctx->cc_lock);
+       cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
+                                    rq_ctx_chain) {
+               cfs_list_del_init(&req->rq_ctx_chain);
+               ptlrpc_client_wake_req(req);
+       }
+       spin_unlock(&ctx->cc_lock);
 }
 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
 
@@ -345,15 +345,15 @@ int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
 
 static int import_sec_check_expire(struct obd_import *imp)
 {
-        int     adapt = 0;
+       int     adapt = 0;
 
-        cfs_spin_lock(&imp->imp_lock);
-        if (imp->imp_sec_expire &&
-            imp->imp_sec_expire < cfs_time_current_sec()) {
-                adapt = 1;
-                imp->imp_sec_expire = 0;
-        }
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       if (imp->imp_sec_expire &&
+           imp->imp_sec_expire < cfs_time_current_sec()) {
+               adapt = 1;
+               imp->imp_sec_expire = 0;
+       }
+       spin_unlock(&imp->imp_lock);
 
         if (!adapt)
                 return 0;
@@ -442,9 +442,9 @@ void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
          * in the context waiting list.
          */
         if (!cfs_list_empty(&req->rq_ctx_chain)) {
-                cfs_spin_lock(&req->rq_cli_ctx->cc_lock);
-                cfs_list_del_init(&req->rq_ctx_chain);
-                cfs_spin_unlock(&req->rq_cli_ctx->cc_lock);
+               spin_lock(&req->rq_cli_ctx->cc_lock);
+               cfs_list_del_init(&req->rq_ctx_chain);
+               spin_unlock(&req->rq_cli_ctx->cc_lock);
         }
 
         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
@@ -545,7 +545,7 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
         LASSERT(newctx);
 
         if (unlikely(newctx == oldctx && 
-                     cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
+                    test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
                 /*
                  * still get the old dead ctx, usually means system too busy
                  */
@@ -608,20 +608,20 @@ int ctx_refresh_timeout(void *data)
 static
 void ctx_refresh_interrupt(void *data)
 {
-        struct ptlrpc_request *req = data;
+       struct ptlrpc_request *req = data;
 
-        cfs_spin_lock(&req->rq_lock);
-        req->rq_intr = 1;
-        cfs_spin_unlock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
+       req->rq_intr = 1;
+       spin_unlock(&req->rq_lock);
 }
 
 static
 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
 {
-        cfs_spin_lock(&ctx->cc_lock);
-        if (!cfs_list_empty(&req->rq_ctx_chain))
-                cfs_list_del_init(&req->rq_ctx_chain);
-        cfs_spin_unlock(&ctx->cc_lock);
+       spin_lock(&ctx->cc_lock);
+       if (!cfs_list_empty(&req->rq_ctx_chain))
+               cfs_list_del_init(&req->rq_ctx_chain);
+       spin_unlock(&ctx->cc_lock);
 }
 
 /**
@@ -673,11 +673,11 @@ again:
         if (cli_ctx_is_eternal(ctx))
                 RETURN(0);
 
-        if (unlikely(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
+       if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
                 LASSERT(ctx->cc_ops->refresh);
                 ctx->cc_ops->refresh(ctx);
         }
-        LASSERT(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
+       LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
 
         LASSERT(ctx->cc_ops->validate);
         if (ctx->cc_ops->validate(ctx) == 0) {
@@ -685,10 +685,10 @@ again:
                 RETURN(0);
         }
 
-        if (unlikely(cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
-                cfs_spin_lock(&req->rq_lock);
-                req->rq_err = 1;
-                cfs_spin_unlock(&req->rq_lock);
+       if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
+               spin_lock(&req->rq_lock);
+               req->rq_err = 1;
+               spin_unlock(&req->rq_lock);
                 req_off_ctx_list(req, ctx);
                 RETURN(-EPERM);
         }
@@ -720,33 +720,33 @@ again:
          *  2. Current context never be refreshed, then we are fine: we
          *     never really send request with old context before.
          */
-        if (cfs_test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
-            unlikely(req->rq_reqmsg) &&
-            lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
-                req_off_ctx_list(req, ctx);
-                RETURN(0);
-        }
-
-        if (unlikely(cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
-                req_off_ctx_list(req, ctx);
-                /*
-                 * don't switch ctx if import was deactivated
-                 */
-                if (req->rq_import->imp_deactive) {
-                        cfs_spin_lock(&req->rq_lock);
-                        req->rq_err = 1;
-                        cfs_spin_unlock(&req->rq_lock);
-                        RETURN(-EINTR);
-                }
-
-                rc = sptlrpc_req_replace_dead_ctx(req);
-                if (rc) {
-                        LASSERT(ctx == req->rq_cli_ctx);
-                        CERROR("req %p: failed to replace dead ctx %p: %d\n",
-                                req, ctx, rc);
-                        cfs_spin_lock(&req->rq_lock);
-                        req->rq_err = 1;
-                        cfs_spin_unlock(&req->rq_lock);
+       if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
+           unlikely(req->rq_reqmsg) &&
+           lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
+               req_off_ctx_list(req, ctx);
+               RETURN(0);
+       }
+
+       if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
+               req_off_ctx_list(req, ctx);
+               /*
+                * don't switch ctx if import was deactivated
+                */
+               if (req->rq_import->imp_deactive) {
+                       spin_lock(&req->rq_lock);
+                       req->rq_err = 1;
+                       spin_unlock(&req->rq_lock);
+                       RETURN(-EINTR);
+               }
+
+               rc = sptlrpc_req_replace_dead_ctx(req);
+               if (rc) {
+                       LASSERT(ctx == req->rq_cli_ctx);
+                       CERROR("req %p: failed to replace dead ctx %p: %d\n",
+                              req, ctx, rc);
+                       spin_lock(&req->rq_lock);
+                       req->rq_err = 1;
+                       spin_unlock(&req->rq_lock);
                         RETURN(rc);
                 }
 
@@ -758,22 +758,22 @@ again:
          * Now we're sure this context is during upcall, add myself into
          * waiting list
          */
-        cfs_spin_lock(&ctx->cc_lock);
-        if (cfs_list_empty(&req->rq_ctx_chain))
-                cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
-        cfs_spin_unlock(&ctx->cc_lock);
-
-        if (timeout < 0)
-                RETURN(-EWOULDBLOCK);
-
-        /* Clear any flags that may be present from previous sends */
-        LASSERT(req->rq_receiving_reply == 0);
-        cfs_spin_lock(&req->rq_lock);
-        req->rq_err = 0;
-        req->rq_timedout = 0;
-        req->rq_resend = 0;
-        req->rq_restart = 0;
-        cfs_spin_unlock(&req->rq_lock);
+       spin_lock(&ctx->cc_lock);
+       if (cfs_list_empty(&req->rq_ctx_chain))
+               cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
+       spin_unlock(&ctx->cc_lock);
+
+       if (timeout < 0)
+               RETURN(-EWOULDBLOCK);
+
+       /* Clear any flags that may be present from previous sends */
+       LASSERT(req->rq_receiving_reply == 0);
+       spin_lock(&req->rq_lock);
+       req->rq_err = 0;
+       req->rq_timedout = 0;
+       req->rq_resend = 0;
+       req->rq_restart = 0;
+       spin_unlock(&req->rq_lock);
 
         lwi = LWI_TIMEOUT_INTR(timeout * CFS_HZ, ctx_refresh_timeout,
                                ctx_refresh_interrupt, req);
@@ -847,9 +847,9 @@ void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
 
         sec = req->rq_cli_ctx->cc_sec;
 
-        cfs_spin_lock(&sec->ps_lock);
-        req->rq_flvr = sec->ps_flvr;
-        cfs_spin_unlock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
+       req->rq_flvr = sec->ps_flvr;
+       spin_unlock(&sec->ps_lock);
 
         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
          * destruction rpc */
@@ -920,7 +920,7 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
         if (!req)
                 RETURN(-ENOMEM);
 
-        cfs_spin_lock_init(&req->rq_lock);
+       spin_lock_init(&req->rq_lock);
         cfs_atomic_set(&req->rq_refcount, 10000);
         CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
         cfs_waitq_init(&req->rq_reply_waitq);
@@ -1114,42 +1114,42 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
                 GOTO(err_req, rc = -ENOMEM);
 
         /* sanity checkings and copy data out, do it inside spinlock */
-        cfs_spin_lock(&req->rq_lock);
-
-        if (req->rq_replied) {
-                cfs_spin_unlock(&req->rq_lock);
-                GOTO(err_buf, rc = -EALREADY);
-        }
-
-        LASSERT(req->rq_repbuf);
-        LASSERT(req->rq_repdata == NULL);
-        LASSERT(req->rq_repmsg == NULL);
-
-        if (req->rq_reply_off != 0) {
-                CERROR("early reply with offset %u\n", req->rq_reply_off);
-                cfs_spin_unlock(&req->rq_lock);
-                GOTO(err_buf, rc = -EPROTO);
-        }
-
-        if (req->rq_nob_received != early_size) {
-                /* even another early arrived the size should be the same */
-                CERROR("data size has changed from %u to %u\n",
-                       early_size, req->rq_nob_received);
-                cfs_spin_unlock(&req->rq_lock);
-                GOTO(err_buf, rc = -EINVAL);
-        }
-
-        if (req->rq_nob_received < sizeof(struct lustre_msg)) {
-                CERROR("early reply length %d too small\n",
-                       req->rq_nob_received);
-                cfs_spin_unlock(&req->rq_lock);
-                GOTO(err_buf, rc = -EALREADY);
-        }
-
-        memcpy(early_buf, req->rq_repbuf, early_size);
-        cfs_spin_unlock(&req->rq_lock);
-
-        cfs_spin_lock_init(&early_req->rq_lock);
+       spin_lock(&req->rq_lock);
+
+       if (req->rq_replied) {
+               spin_unlock(&req->rq_lock);
+               GOTO(err_buf, rc = -EALREADY);
+       }
+
+       LASSERT(req->rq_repbuf);
+       LASSERT(req->rq_repdata == NULL);
+       LASSERT(req->rq_repmsg == NULL);
+
+       if (req->rq_reply_off != 0) {
+               CERROR("early reply with offset %u\n", req->rq_reply_off);
+               spin_unlock(&req->rq_lock);
+               GOTO(err_buf, rc = -EPROTO);
+       }
+
+       if (req->rq_nob_received != early_size) {
+               /* even another early arrived the size should be the same */
+               CERROR("data size has changed from %u to %u\n",
+                      early_size, req->rq_nob_received);
+               spin_unlock(&req->rq_lock);
+               GOTO(err_buf, rc = -EINVAL);
+       }
+
+       if (req->rq_nob_received < sizeof(struct lustre_msg)) {
+               CERROR("early reply length %d too small\n",
+                      req->rq_nob_received);
+               spin_unlock(&req->rq_lock);
+               GOTO(err_buf, rc = -EALREADY);
+       }
+
+       memcpy(early_buf, req->rq_repbuf, early_size);
+       spin_unlock(&req->rq_lock);
+
+       spin_lock_init(&early_req->rq_lock);
         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
         early_req->rq_flvr = req->rq_flvr;
         early_req->rq_repbuf = early_buf;
@@ -1334,27 +1334,27 @@ struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
 
 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
 {
-        struct ptlrpc_sec *sec;
+       struct ptlrpc_sec *sec;
 
-        cfs_spin_lock(&imp->imp_lock);
-        sec = sptlrpc_sec_get(imp->imp_sec);
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       sec = sptlrpc_sec_get(imp->imp_sec);
+       spin_unlock(&imp->imp_lock);
 
-        return sec;
+       return sec;
 }
 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
 
 static void sptlrpc_import_sec_install(struct obd_import *imp,
                                        struct ptlrpc_sec *sec)
 {
-        struct ptlrpc_sec *old_sec;
+       struct ptlrpc_sec *old_sec;
 
-        LASSERT_ATOMIC_POS(&sec->ps_refcount);
+       LASSERT_ATOMIC_POS(&sec->ps_refcount);
 
-        cfs_spin_lock(&imp->imp_lock);
-        old_sec = imp->imp_sec;
-        imp->imp_sec = sec;
-        cfs_spin_unlock(&imp->imp_lock);
+       spin_lock(&imp->imp_lock);
+       old_sec = imp->imp_sec;
+       imp->imp_sec = sec;
+       spin_unlock(&imp->imp_lock);
 
         if (old_sec) {
                 sptlrpc_sec_kill(old_sec);
@@ -1389,9 +1389,9 @@ static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
                        sptlrpc_secflags2str(sf->sf_flags,
                                             str2, sizeof(str2)));
 
-        cfs_spin_lock(&sec->ps_lock);
-        flavor_copy(&sec->ps_flvr, sf);
-        cfs_spin_unlock(&sec->ps_lock);
+       spin_lock(&sec->ps_lock);
+       flavor_copy(&sec->ps_flvr, sf);
+       spin_unlock(&sec->ps_lock);
 }
 
 /**
@@ -1475,7 +1475,7 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
         }
 
-        cfs_mutex_lock(&imp->imp_sec_mutex);
+       mutex_lock(&imp->imp_sec_mutex);
 
         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
         if (newsec) {
@@ -1487,7 +1487,7 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
                 rc = -EPERM;
         }
 
-        cfs_mutex_unlock(&imp->imp_sec_mutex);
+       mutex_unlock(&imp->imp_sec_mutex);
 out:
         sptlrpc_sec_put(sec);
         RETURN(rc);
@@ -1776,7 +1776,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
         if (req->rq_ctx_fini)
                 return 0;
 
-        cfs_spin_lock(&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
 
         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
          * the first req with the new flavor, then treat it as current flavor,
@@ -1806,16 +1806,16 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                     !(req->rq_ctx_init &&
                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
                        req->rq_auth_usr_ost))) {
-                        cfs_spin_unlock(&exp->exp_lock);
-                        CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
-                               req->rq_auth_gss, req->rq_ctx_init,
-                               req->rq_auth_usr_root, req->rq_auth_usr_mdt,
-                               req->rq_auth_usr_ost);
-                        return 0;
-                }
+                       spin_unlock(&exp->exp_lock);
+                       CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
+                              req->rq_auth_gss, req->rq_ctx_init,
+                              req->rq_auth_usr_root, req->rq_auth_usr_mdt,
+                              req->rq_auth_usr_ost);
+                       return 0;
+               }
 
-                exp->exp_flvr_adapt = 0;
-                cfs_spin_unlock(&exp->exp_lock);
+               exp->exp_flvr_adapt = 0;
+               spin_unlock(&exp->exp_lock);
 
                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
                                                 req->rq_svc_ctx, &flavor);
@@ -1829,37 +1829,37 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
                      !req->rq_auth_usr_ost)) {
-                        cfs_spin_unlock(&exp->exp_lock);
-                        return 0;
-                }
-
-                /* if flavor just changed, we should not proceed, just leave
-                 * it and current flavor will be discovered and replaced
-                 * shortly, and let _this_ rpc pass through */
-                if (exp->exp_flvr_changed) {
-                        LASSERT(exp->exp_flvr_adapt);
-                        cfs_spin_unlock(&exp->exp_lock);
-                        return 0;
-                }
-
-                if (exp->exp_flvr_adapt) {
-                        exp->exp_flvr_adapt = 0;
-                        CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
-                               exp, exp->exp_flvr.sf_rpc,
-                               exp->exp_flvr_old[0].sf_rpc,
-                               exp->exp_flvr_old[1].sf_rpc);
-                        flavor = exp->exp_flvr;
-                        cfs_spin_unlock(&exp->exp_lock);
-
-                        return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
-                                                        req->rq_svc_ctx,
-                                                        &flavor);
-                } else {
-                        CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
-                               "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
-                               exp->exp_flvr_old[0].sf_rpc,
-                               exp->exp_flvr_old[1].sf_rpc);
-                        cfs_spin_unlock(&exp->exp_lock);
+                       spin_unlock(&exp->exp_lock);
+                       return 0;
+               }
+
+               /* if flavor just changed, we should not proceed, just leave
+                * it and current flavor will be discovered and replaced
+                * shortly, and let _this_ rpc pass through */
+               if (exp->exp_flvr_changed) {
+                       LASSERT(exp->exp_flvr_adapt);
+                       spin_unlock(&exp->exp_lock);
+                       return 0;
+               }
+
+               if (exp->exp_flvr_adapt) {
+                       exp->exp_flvr_adapt = 0;
+                       CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
+                              exp, exp->exp_flvr.sf_rpc,
+                              exp->exp_flvr_old[0].sf_rpc,
+                              exp->exp_flvr_old[1].sf_rpc);
+                       flavor = exp->exp_flvr;
+                       spin_unlock(&exp->exp_lock);
+
+                       return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
+                                                       req->rq_svc_ctx,
+                                                       &flavor);
+               } else {
+                       CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
+                              "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
+                              exp->exp_flvr_old[0].sf_rpc,
+                              exp->exp_flvr_old[1].sf_rpc);
+                       spin_unlock(&exp->exp_lock);
 
                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
                                                            req->rq_svc_ctx);
@@ -1876,7 +1876,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                                        exp->exp_flvr_old[1].sf_rpc,
                                        exp->exp_flvr_expire[0] -
                                                 cfs_time_current_sec());
-                                cfs_spin_unlock(&exp->exp_lock);
+                               spin_unlock(&exp->exp_lock);
                                 return 0;
                         }
                 } else {
@@ -1901,7 +1901,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                                        exp->exp_flvr_old[1].sf_rpc,
                                        exp->exp_flvr_expire[1] -
                                                 cfs_time_current_sec());
-                                cfs_spin_unlock(&exp->exp_lock);
+                               spin_unlock(&exp->exp_lock);
                                 return 0;
                         }
                 } else {
@@ -1918,7 +1918,7 @@ int sptlrpc_target_export_check(struct obd_export *exp,
                        exp->exp_flvr_old[1].sf_rpc);
         }
 
-        cfs_spin_unlock(&exp->exp_lock);
+       spin_unlock(&exp->exp_lock);
 
         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
@@ -1947,16 +1947,16 @@ void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
 
         LASSERT(obd);
 
-        cfs_spin_lock(&obd->obd_dev_lock);
+       spin_lock(&obd->obd_dev_lock);
 
-        cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
-                if (exp->exp_connection == NULL)
-                        continue;
+       cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+               if (exp->exp_connection == NULL)
+                       continue;
 
-                /* note if this export had just been updated flavor
-                 * (exp_flvr_changed == 1), this will override the
-                 * previous one. */
-                cfs_spin_lock(&exp->exp_lock);
+               /* note if this export had just been updated flavor
+                * (exp_flvr_changed == 1), this will override the
+                * previous one. */
+               spin_lock(&exp->exp_lock);
                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
                                              exp->exp_connection->c_peer.nid,
                                              &new_flvr);
@@ -1972,10 +1972,10 @@ void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
                                exp->exp_flvr.sf_rpc,
                                exp->exp_flvr_old[1].sf_rpc);
                 }
-                cfs_spin_unlock(&exp->exp_lock);
-        }
+               spin_unlock(&exp->exp_lock);
+       }
 
-        cfs_spin_unlock(&obd->obd_dev_lock);
+       spin_unlock(&obd->obd_dev_lock);
 }
 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
 
@@ -2506,7 +2506,7 @@ int sptlrpc_init(void)
 {
         int rc;
 
-        cfs_rwlock_init(&policy_lock);
+       rwlock_init(&policy_lock);
 
         rc = sptlrpc_gc_init();
         if (rc)
index a9eaccb..e93b901 100644 (file)
@@ -104,7 +104,7 @@ static struct ptlrpc_enc_page_pool {
         /*
          * in-pool pages bookkeeping
          */
-        cfs_spinlock_t   epp_lock;        /* protect following fields */
+       spinlock_t       epp_lock;         /* protect following fields */
         unsigned long    epp_total_pages; /* total pages in pools */
         unsigned long    epp_free_pages;  /* current pages available */
 
@@ -141,7 +141,7 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
 {
         int     rc;
 
-        cfs_spin_lock(&page_pools.epp_lock);
+       spin_lock(&page_pools.epp_lock);
 
         rc = snprintf(page, count,
                       "physical pages:          %lu\n"
@@ -183,8 +183,8 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
                       page_pools.epp_st_max_wait, CFS_HZ
                      );
 
-        cfs_spin_unlock(&page_pools.epp_lock);
-        return rc;
+       spin_unlock(&page_pools.epp_lock);
+       return rc;
 }
 
 static void enc_pools_release_free_pages(long npages)
@@ -238,8 +238,8 @@ static void enc_pools_release_free_pages(long npages)
  */
 static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 {
-        if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
-                cfs_spin_lock(&page_pools.epp_lock);
+       if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
+               spin_lock(&page_pools.epp_lock);
                 shrink_param(sc, nr_to_scan) = min_t(unsigned long,
                                                    shrink_param(sc, nr_to_scan),
                                                    page_pools.epp_free_pages -
@@ -254,23 +254,23 @@ static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
                         page_pools.epp_st_shrinks++;
                         page_pools.epp_last_shrink = cfs_time_current_sec();
                 }
-                cfs_spin_unlock(&page_pools.epp_lock);
-        }
+               spin_unlock(&page_pools.epp_lock);
+       }
 
-        /*
-         * if no pool access for a long time, we consider it's fully idle.
-         * a little race here is fine.
-         */
-        if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
-                     CACHE_QUIESCENT_PERIOD)) {
-                cfs_spin_lock(&page_pools.epp_lock);
-                page_pools.epp_idle_idx = IDLE_IDX_MAX;
-                cfs_spin_unlock(&page_pools.epp_lock);
-        }
+       /*
+        * if no pool access for a long time, we consider it's fully idle.
+        * a little race here is fine.
+        */
+       if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+                    CACHE_QUIESCENT_PERIOD)) {
+               spin_lock(&page_pools.epp_lock);
+               page_pools.epp_idle_idx = IDLE_IDX_MAX;
+               spin_unlock(&page_pools.epp_lock);
+       }
 
-        LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
-        return max((int) page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
-               (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+       LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+       return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+               (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
 }
 
 static inline
@@ -321,7 +321,7 @@ static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
         LASSERT(npages_to_npools(npages) == npools);
         LASSERT(page_pools.epp_growing);
 
-        cfs_spin_lock(&page_pools.epp_lock);
+       spin_lock(&page_pools.epp_lock);
 
         /*
          * (1) fill all the free slots of current pools.
@@ -388,20 +388,20 @@ static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
         CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
                page_pools.epp_total_pages);
 
-        cfs_spin_unlock(&page_pools.epp_lock);
+       spin_unlock(&page_pools.epp_lock);
 }
 
 static int enc_pools_add_pages(int npages)
 {
-        static CFS_DEFINE_MUTEX(add_pages_mutex);
-        cfs_page_t   ***pools;
-        int             npools, alloced = 0;
-        int             i, j, rc = -ENOMEM;
+       static DEFINE_MUTEX(add_pages_mutex);
+       cfs_page_t   ***pools;
+       int             npools, alloced = 0;
+       int             i, j, rc = -ENOMEM;
 
-        if (npages < PTLRPC_MAX_BRW_PAGES)
-                npages = PTLRPC_MAX_BRW_PAGES;
+       if (npages < PTLRPC_MAX_BRW_PAGES)
+               npages = PTLRPC_MAX_BRW_PAGES;
 
-        cfs_mutex_lock(&add_pages_mutex);
+       mutex_lock(&add_pages_mutex);
 
         if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
                 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
@@ -443,7 +443,7 @@ out:
                 CERROR("Failed to allocate %d enc pages\n", npages);
         }
 
-        cfs_mutex_unlock(&add_pages_mutex);
+       mutex_unlock(&add_pages_mutex);
         return rc;
 }
 
@@ -514,7 +514,7 @@ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
         if (desc->bd_enc_iov == NULL)
                 return -ENOMEM;
 
-        cfs_spin_lock(&page_pools.epp_lock);
+       spin_lock(&page_pools.epp_lock);
 
         page_pools.epp_st_access++;
 again:
@@ -530,9 +530,9 @@ again:
                 if (enc_pools_should_grow(desc->bd_iov_count, now)) {
                         page_pools.epp_growing = 1;
 
-                        cfs_spin_unlock(&page_pools.epp_lock);
-                        enc_pools_add_pages(page_pools.epp_pages_short / 2);
-                        cfs_spin_lock(&page_pools.epp_lock);
+                       spin_unlock(&page_pools.epp_lock);
+                       enc_pools_add_pages(page_pools.epp_pages_short / 2);
+                       spin_lock(&page_pools.epp_lock);
 
                         page_pools.epp_growing = 0;
 
@@ -547,11 +547,11 @@ again:
                         cfs_waitlink_init(&waitlink);
                         cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
 
-                        cfs_spin_unlock(&page_pools.epp_lock);
-                        cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
-                        cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
-                        LASSERT(page_pools.epp_waitqlen > 0);
-                        cfs_spin_lock(&page_pools.epp_lock);
+                       spin_unlock(&page_pools.epp_lock);
+                       cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
+                       cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
+                       LASSERT(page_pools.epp_waitqlen > 0);
+                       spin_lock(&page_pools.epp_lock);
                         page_pools.epp_waitqlen--;
                 }
 
@@ -603,8 +603,8 @@ again:
 
         page_pools.epp_last_access = cfs_time_current_sec();
 
-        cfs_spin_unlock(&page_pools.epp_lock);
-        return 0;
+       spin_unlock(&page_pools.epp_lock);
+       return 0;
 }
 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
 
@@ -618,7 +618,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 
         LASSERT(desc->bd_iov_count > 0);
 
-        cfs_spin_lock(&page_pools.epp_lock);
+       spin_lock(&page_pools.epp_lock);
 
         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
@@ -645,11 +645,11 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 
         enc_pools_wakeup();
 
-        cfs_spin_unlock(&page_pools.epp_lock);
+       spin_unlock(&page_pools.epp_lock);
 
-        OBD_FREE(desc->bd_enc_iov,
-                 desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
-        desc->bd_enc_iov = NULL;
+       OBD_FREE(desc->bd_enc_iov,
+                desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
+       desc->bd_enc_iov = NULL;
 }
 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
 
@@ -660,25 +660,25 @@ EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
  */
 int sptlrpc_enc_pool_add_user(void)
 {
-        int     need_grow = 0;
+       int     need_grow = 0;
 
-        cfs_spin_lock(&page_pools.epp_lock);
-        if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
-                page_pools.epp_growing = 1;
-                need_grow = 1;
-        }
-        cfs_spin_unlock(&page_pools.epp_lock);
+       spin_lock(&page_pools.epp_lock);
+       if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
+               page_pools.epp_growing = 1;
+               need_grow = 1;
+       }
+       spin_unlock(&page_pools.epp_lock);
 
-        if (need_grow) {
-                enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
-                                    PTLRPC_MAX_BRW_PAGES);
+       if (need_grow) {
+               enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
+                                   PTLRPC_MAX_BRW_PAGES);
 
-                cfs_spin_lock(&page_pools.epp_lock);
-                page_pools.epp_growing = 0;
-                enc_pools_wakeup();
-                cfs_spin_unlock(&page_pools.epp_lock);
-        }
-        return 0;
+               spin_lock(&page_pools.epp_lock);
+               page_pools.epp_growing = 0;
+               enc_pools_wakeup();
+               spin_unlock(&page_pools.epp_lock);
+       }
+       return 0;
 }
 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
 
@@ -725,7 +725,7 @@ int sptlrpc_enc_pool_init(void)
         page_pools.epp_last_shrink = cfs_time_current_sec();
         page_pools.epp_last_access = cfs_time_current_sec();
 
-        cfs_spin_lock_init(&page_pools.epp_lock);
+       spin_lock_init(&page_pools.epp_lock);
         page_pools.epp_total_pages = 0;
         page_pools.epp_free_pages = 0;
 
index 3f4698d..ffae130 100644 (file)
@@ -515,7 +515,7 @@ struct sptlrpc_conf {
         cfs_list_t              sc_tgts;      /* target-specific rules */
 };
 
-static cfs_mutex_t sptlrpc_conf_lock;
+static struct mutex sptlrpc_conf_lock;
 static CFS_LIST_HEAD(sptlrpc_confs);
 
 static inline int is_hex(char c)
@@ -696,7 +696,7 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
         if (conf == NULL) {
                 target2fsname(target, fsname, sizeof(fsname));
 
-                cfs_mutex_lock(&sptlrpc_conf_lock);
+               mutex_lock(&sptlrpc_conf_lock);
                 conf = sptlrpc_conf_get(fsname, 0);
                 if (conf == NULL) {
                         CERROR("can't find conf\n");
@@ -704,9 +704,9 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
                 } else {
                         rc = sptlrpc_conf_merge_rule(conf, target, &rule);
                 }
-                cfs_mutex_unlock(&sptlrpc_conf_lock);
+               mutex_unlock(&sptlrpc_conf_lock);
         } else {
-                LASSERT(cfs_mutex_is_locked(&sptlrpc_conf_lock));
+               LASSERT(mutex_is_locked(&sptlrpc_conf_lock));
                 rc = sptlrpc_conf_merge_rule(conf, target, &rule);
         }
 
@@ -748,7 +748,7 @@ void sptlrpc_conf_log_update_begin(const char *logname)
         if (logname2fsname(logname, fsname, sizeof(fsname)))
                 return;
 
-        cfs_mutex_lock(&sptlrpc_conf_lock);
+       mutex_lock(&sptlrpc_conf_lock);
 
         conf = sptlrpc_conf_get(fsname, 0);
         if (conf && conf->sc_local) {
@@ -757,7 +757,7 @@ void sptlrpc_conf_log_update_begin(const char *logname)
         }
         conf->sc_modified = 0;
 
-        cfs_mutex_unlock(&sptlrpc_conf_lock);
+       mutex_unlock(&sptlrpc_conf_lock);
 }
 EXPORT_SYMBOL(sptlrpc_conf_log_update_begin);
 
@@ -772,7 +772,7 @@ void sptlrpc_conf_log_update_end(const char *logname)
         if (logname2fsname(logname, fsname, sizeof(fsname)))
                 return;
 
-        cfs_mutex_lock(&sptlrpc_conf_lock);
+       mutex_lock(&sptlrpc_conf_lock);
 
         conf = sptlrpc_conf_get(fsname, 0);
         if (conf) {
@@ -786,7 +786,7 @@ void sptlrpc_conf_log_update_end(const char *logname)
                 conf->sc_updated = 1;
         }
 
-        cfs_mutex_unlock(&sptlrpc_conf_lock);
+       mutex_unlock(&sptlrpc_conf_lock);
 }
 EXPORT_SYMBOL(sptlrpc_conf_log_update_end);
 
@@ -797,9 +797,9 @@ void sptlrpc_conf_log_start(const char *logname)
         if (logname2fsname(logname, fsname, sizeof(fsname)))
                 return;
 
-        cfs_mutex_lock(&sptlrpc_conf_lock);
+       mutex_lock(&sptlrpc_conf_lock);
         sptlrpc_conf_get(fsname, 1);
-        cfs_mutex_unlock(&sptlrpc_conf_lock);
+       mutex_unlock(&sptlrpc_conf_lock);
 }
 EXPORT_SYMBOL(sptlrpc_conf_log_start);
 
@@ -811,11 +811,11 @@ void sptlrpc_conf_log_stop(const char *logname)
         if (logname2fsname(logname, fsname, sizeof(fsname)))
                 return;
 
-        cfs_mutex_lock(&sptlrpc_conf_lock);
+       mutex_lock(&sptlrpc_conf_lock);
         conf = sptlrpc_conf_get(fsname, 0);
         if (conf)
                 sptlrpc_conf_free(conf);
-        cfs_mutex_unlock(&sptlrpc_conf_lock);
+       mutex_unlock(&sptlrpc_conf_lock);
 }
 EXPORT_SYMBOL(sptlrpc_conf_log_stop);
 
@@ -857,7 +857,7 @@ void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
 
         target2fsname(target->uuid, name, sizeof(name));
 
-        cfs_mutex_lock(&sptlrpc_conf_lock);
+       mutex_lock(&sptlrpc_conf_lock);
 
         conf = sptlrpc_conf_get(name, 0);
         if (conf == NULL)
@@ -879,7 +879,7 @@ void sptlrpc_conf_choose_flavor(enum lustre_sec_part from,
 
         rc = sptlrpc_rule_set_choose(&conf->sc_rset, from, to, nid, sf);
 out:
-        cfs_mutex_unlock(&sptlrpc_conf_lock);
+       mutex_unlock(&sptlrpc_conf_lock);
 
         if (rc == 0)
                 get_default_flavor(sf);
@@ -917,19 +917,19 @@ void sptlrpc_conf_client_adapt(struct obd_device *obd)
         CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
 
         /* serialize with connect/disconnect import */
-        cfs_down_read(&obd->u.cli.cl_sem);
-
-        imp = obd->u.cli.cl_import;
-        if (imp) {
-                cfs_spin_lock(&imp->imp_lock);
-                if (imp->imp_sec)
-                        imp->imp_sec_expire = cfs_time_current_sec() +
-                                              SEC_ADAPT_DELAY;
-                cfs_spin_unlock(&imp->imp_lock);
-        }
+       down_read(&obd->u.cli.cl_sem);
+
+       imp = obd->u.cli.cl_import;
+       if (imp) {
+               spin_lock(&imp->imp_lock);
+               if (imp->imp_sec)
+                       imp->imp_sec_expire = cfs_time_current_sec() +
+                               SEC_ADAPT_DELAY;
+               spin_unlock(&imp->imp_lock);
+       }
 
-        cfs_up_read(&obd->u.cli.cl_sem);
-        EXIT;
+       up_read(&obd->u.cli.cl_sem);
+       EXIT;
 }
 EXPORT_SYMBOL(sptlrpc_conf_client_adapt);
 
@@ -1181,7 +1181,7 @@ int sptlrpc_conf_target_get_rules(struct obd_device *obd,
 
         target2fsname(obd->obd_uuid.uuid, fsname, sizeof(fsname));
 
-        cfs_mutex_lock(&sptlrpc_conf_lock);
+       mutex_lock(&sptlrpc_conf_lock);
 
         conf = sptlrpc_conf_get(fsname, 0);
         if (conf == NULL) {
@@ -1218,14 +1218,14 @@ int sptlrpc_conf_target_get_rules(struct obd_device *obd,
                                       conf_tgt ? &conf_tgt->sct_rset: NULL,
                                       LUSTRE_SP_ANY, sp_dst, rset);
 out:
-        cfs_mutex_unlock(&sptlrpc_conf_lock);
+       mutex_unlock(&sptlrpc_conf_lock);
         RETURN(rc);
 }
 EXPORT_SYMBOL(sptlrpc_conf_target_get_rules);
 
 int  sptlrpc_conf_init(void)
 {
-        cfs_mutex_init(&sptlrpc_conf_lock);
+       mutex_init(&sptlrpc_conf_lock);
         return 0;
 }
 
@@ -1233,10 +1233,10 @@ void sptlrpc_conf_fini(void)
 {
         struct sptlrpc_conf  *conf, *conf_next;
 
-        cfs_mutex_lock(&sptlrpc_conf_lock);
+       mutex_lock(&sptlrpc_conf_lock);
         cfs_list_for_each_entry_safe(conf, conf_next, &sptlrpc_confs, sc_list) {
                 sptlrpc_conf_free(conf);
         }
         LASSERT(cfs_list_empty(&sptlrpc_confs));
-        cfs_mutex_unlock(&sptlrpc_conf_lock);
+       mutex_unlock(&sptlrpc_conf_lock);
 }
index a1649d2..7af5858 100644 (file)
 
 #ifdef __KERNEL__
 
-static cfs_mutex_t sec_gc_mutex;
+static struct mutex sec_gc_mutex;
 static CFS_LIST_HEAD(sec_gc_list);
-static cfs_spinlock_t sec_gc_list_lock;
+static spinlock_t sec_gc_list_lock;
 
 static CFS_LIST_HEAD(sec_gc_ctx_list);
-static cfs_spinlock_t sec_gc_ctx_list_lock;
+static spinlock_t sec_gc_ctx_list_lock;
 
 static struct ptlrpc_thread sec_gc_thread;
 static cfs_atomic_t sec_gc_wait_del = CFS_ATOMIC_INIT(0);
@@ -74,11 +74,11 @@ void sptlrpc_gc_add_sec(struct ptlrpc_sec *sec)
 
         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
 
-        cfs_spin_lock(&sec_gc_list_lock);
-        cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
-        cfs_spin_unlock(&sec_gc_list_lock);
+       spin_lock(&sec_gc_list_lock);
+       cfs_list_add_tail(&sec_gc_list, &sec->ps_gc_list);
+       spin_unlock(&sec_gc_list_lock);
 
-        CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+       CDEBUG(D_SEC, "added sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
 EXPORT_SYMBOL(sptlrpc_gc_add_sec);
 
@@ -92,57 +92,57 @@ void sptlrpc_gc_del_sec(struct ptlrpc_sec *sec)
         /* signal before list_del to make iteration in gc thread safe */
         cfs_atomic_inc(&sec_gc_wait_del);
 
-        cfs_spin_lock(&sec_gc_list_lock);
-        cfs_list_del_init(&sec->ps_gc_list);
-        cfs_spin_unlock(&sec_gc_list_lock);
+       spin_lock(&sec_gc_list_lock);
+       cfs_list_del_init(&sec->ps_gc_list);
+       spin_unlock(&sec_gc_list_lock);
 
-        /* barrier */
-        cfs_mutex_lock(&sec_gc_mutex);
-        cfs_mutex_unlock(&sec_gc_mutex);
+       /* barrier */
+       mutex_lock(&sec_gc_mutex);
+       mutex_unlock(&sec_gc_mutex);
 
-        cfs_atomic_dec(&sec_gc_wait_del);
+       cfs_atomic_dec(&sec_gc_wait_del);
 
-        CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
+       CDEBUG(D_SEC, "del sec %p(%s)\n", sec, sec->ps_policy->sp_name);
 }
 EXPORT_SYMBOL(sptlrpc_gc_del_sec);
 
 void sptlrpc_gc_add_ctx(struct ptlrpc_cli_ctx *ctx)
 {
-        LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
+       LASSERT(cfs_list_empty(&ctx->cc_gc_chain));
 
-        CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
-               ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
-        cfs_spin_lock(&sec_gc_ctx_list_lock);
-        cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
-        cfs_spin_unlock(&sec_gc_ctx_list_lock);
+       CDEBUG(D_SEC, "hand over ctx %p(%u->%s)\n",
+              ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+       spin_lock(&sec_gc_ctx_list_lock);
+       cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
+       spin_unlock(&sec_gc_ctx_list_lock);
 
-        thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
-        cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
+       thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
+       cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
 }
 EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
 
 static void sec_process_ctx_list(void)
 {
-        struct ptlrpc_cli_ctx *ctx;
+       struct ptlrpc_cli_ctx *ctx;
 
-        cfs_spin_lock(&sec_gc_ctx_list_lock);
+       spin_lock(&sec_gc_ctx_list_lock);
 
-        while (!cfs_list_empty(&sec_gc_ctx_list)) {
-                ctx = cfs_list_entry(sec_gc_ctx_list.next,
-                                     struct ptlrpc_cli_ctx, cc_gc_chain);
-                cfs_list_del_init(&ctx->cc_gc_chain);
-                cfs_spin_unlock(&sec_gc_ctx_list_lock);
+       while (!cfs_list_empty(&sec_gc_ctx_list)) {
+               ctx = cfs_list_entry(sec_gc_ctx_list.next,
+                                    struct ptlrpc_cli_ctx, cc_gc_chain);
+               cfs_list_del_init(&ctx->cc_gc_chain);
+               spin_unlock(&sec_gc_ctx_list_lock);
 
-                LASSERT(ctx->cc_sec);
-                LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
-                CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
-                       ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
-                sptlrpc_cli_ctx_put(ctx, 1);
+               LASSERT(ctx->cc_sec);
+               LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 1);
+               CDEBUG(D_SEC, "gc pick up ctx %p(%u->%s)\n",
+                      ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
+               sptlrpc_cli_ctx_put(ctx, 1);
 
-                cfs_spin_lock(&sec_gc_ctx_list_lock);
-        }
+               spin_lock(&sec_gc_ctx_list_lock);
+       }
 
-        cfs_spin_unlock(&sec_gc_ctx_list_lock);
+       spin_unlock(&sec_gc_ctx_list_lock);
 }
 
 static void sec_do_gc(struct ptlrpc_sec *sec)
@@ -187,19 +187,19 @@ again:
                  * to trace each sec as order of expiry time.
                  * another issue here is we wakeup as fixed interval instead of
                  * according to each sec's expiry time */
-                cfs_mutex_lock(&sec_gc_mutex);
+               mutex_lock(&sec_gc_mutex);
                 cfs_list_for_each_entry(sec, &sec_gc_list, ps_gc_list) {
                         /* if someone is waiting to be deleted, let it
                          * proceed as soon as possible. */
                         if (cfs_atomic_read(&sec_gc_wait_del)) {
                                 CDEBUG(D_SEC, "deletion pending, start over\n");
-                                cfs_mutex_unlock(&sec_gc_mutex);
+                               mutex_unlock(&sec_gc_mutex);
                                 goto again;
                         }
 
                         sec_do_gc(sec);
                 }
-                cfs_mutex_unlock(&sec_gc_mutex);
+               mutex_unlock(&sec_gc_mutex);
 
                 /* check ctx list again before sleep */
                 sec_process_ctx_list();
@@ -221,12 +221,12 @@ again:
 
 int sptlrpc_gc_init(void)
 {
-        struct l_wait_info lwi = { 0 };
-        int                rc;
+       struct l_wait_info lwi = { 0 };
+       int                rc;
 
-        cfs_mutex_init(&sec_gc_mutex);
-        cfs_spin_lock_init(&sec_gc_list_lock);
-        cfs_spin_lock_init(&sec_gc_ctx_list_lock);
+       mutex_init(&sec_gc_mutex);
+       spin_lock_init(&sec_gc_list_lock);
+       spin_lock_init(&sec_gc_ctx_list_lock);
 
         /* initialize thread control */
         memset(&sec_gc_thread, 0, sizeof(sec_gc_thread));
index f884574..5b51c18 100644 (file)
@@ -425,7 +425,7 @@ static void null_init_internal(void)
         null_sec.ps_flvr.sf_flags = 0;
         null_sec.ps_part = LUSTRE_SP_ANY;
         null_sec.ps_dying = 0;
-        cfs_spin_lock_init(&null_sec.ps_lock);
+       spin_lock_init(&null_sec.ps_lock);
         cfs_atomic_set(&null_sec.ps_nctx, 1);         /* for "null_cli_ctx" */
         CFS_INIT_LIST_HEAD(&null_sec.ps_gc_list);
         null_sec.ps_gc_interval = 0;
@@ -439,9 +439,9 @@ static void null_init_internal(void)
         null_cli_ctx.cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_ETERNAL |
                                 PTLRPC_CTX_UPTODATE;
         null_cli_ctx.cc_vcred.vc_uid = 0;
-        cfs_spin_lock_init(&null_cli_ctx.cc_lock);
-        CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
-        CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
+       spin_lock_init(&null_cli_ctx.cc_lock);
+       CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_req_list);
+       CFS_INIT_LIST_HEAD(&null_cli_ctx.cc_gc_chain);
 }
 
 int sptlrpc_null_init(void)
index 10f88fa..d02e44e 100644 (file)
@@ -52,7 +52,7 @@
 
 struct plain_sec {
         struct ptlrpc_sec       pls_base;
-        cfs_rwlock_t            pls_lock;
+       rwlock_t            pls_lock;
         struct ptlrpc_cli_ctx  *pls_ctx;
 };
 
@@ -398,11 +398,11 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
 static
 struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
 {
-        struct ptlrpc_cli_ctx  *ctx, *ctx_new;
+       struct ptlrpc_cli_ctx  *ctx, *ctx_new;
 
-        OBD_ALLOC_PTR(ctx_new);
+       OBD_ALLOC_PTR(ctx_new);
 
-        cfs_write_lock(&plsec->pls_lock);
+       write_lock(&plsec->pls_lock);
 
         ctx = plsec->pls_ctx;
         if (ctx) {
@@ -419,7 +419,7 @@ struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
                 ctx->cc_expire = 0;
                 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
                 ctx->cc_vcred.vc_uid = 0;
-                cfs_spin_lock_init(&ctx->cc_lock);
+               spin_lock_init(&ctx->cc_lock);
                 CFS_INIT_LIST_HEAD(&ctx->cc_req_list);
                 CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain);
 
@@ -430,9 +430,9 @@ struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
                 cfs_atomic_inc(&ctx->cc_refcount); /* for caller */
         }
 
-        cfs_write_unlock(&plsec->pls_lock);
+       write_unlock(&plsec->pls_lock);
 
-        return ctx;
+       return ctx;
 }
 
 static
@@ -478,17 +478,17 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
         /*
          * initialize plain_sec
          */
-        cfs_rwlock_init(&plsec->pls_lock);
-        plsec->pls_ctx = NULL;
-
-        sec = &plsec->pls_base;
-        sec->ps_policy = &plain_policy;
-        cfs_atomic_set(&sec->ps_refcount, 0);
-        cfs_atomic_set(&sec->ps_nctx, 0);
-        sec->ps_id = sptlrpc_get_next_secid();
-        sec->ps_import = class_import_get(imp);
-        sec->ps_flvr = *sf;
-        cfs_spin_lock_init(&sec->ps_lock);
+       rwlock_init(&plsec->pls_lock);
+       plsec->pls_ctx = NULL;
+
+       sec = &plsec->pls_base;
+       sec->ps_policy = &plain_policy;
+       cfs_atomic_set(&sec->ps_refcount, 0);
+       cfs_atomic_set(&sec->ps_nctx, 0);
+       sec->ps_id = sptlrpc_get_next_secid();
+       sec->ps_import = class_import_get(imp);
+       sec->ps_flvr = *sf;
+       spin_lock_init(&sec->ps_lock);
         CFS_INIT_LIST_HEAD(&sec->ps_gc_list);
         sec->ps_gc_interval = 0;
         sec->ps_gc_next = 0;
@@ -511,20 +511,20 @@ struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
                                         struct vfs_cred *vcred,
                                         int create, int remove_dead)
 {
-        struct plain_sec       *plsec = sec2plsec(sec);
-        struct ptlrpc_cli_ctx  *ctx;
-        ENTRY;
+       struct plain_sec       *plsec = sec2plsec(sec);
+       struct ptlrpc_cli_ctx  *ctx;
+       ENTRY;
 
-        cfs_read_lock(&plsec->pls_lock);
-        ctx = plsec->pls_ctx;
-        if (ctx)
-                cfs_atomic_inc(&ctx->cc_refcount);
-        cfs_read_unlock(&plsec->pls_lock);
+       read_lock(&plsec->pls_lock);
+       ctx = plsec->pls_ctx;
+       if (ctx)
+               cfs_atomic_inc(&ctx->cc_refcount);
+       read_unlock(&plsec->pls_lock);
 
-        if (unlikely(ctx == NULL))
-                ctx = plain_sec_install_ctx(plsec);
+       if (unlikely(ctx == NULL))
+               ctx = plain_sec_install_ctx(plsec);
 
-        RETURN(ctx);
+       RETURN(ctx);
 }
 
 static
@@ -554,10 +554,10 @@ int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
         if (uid != -1)
                 RETURN(0);
 
-        cfs_write_lock(&plsec->pls_lock);
+       write_lock(&plsec->pls_lock);
         ctx = plsec->pls_ctx;
         plsec->pls_ctx = NULL;
-        cfs_write_unlock(&plsec->pls_lock);
+       write_unlock(&plsec->pls_lock);
 
         if (ctx)
                 sptlrpc_cli_ctx_put(ctx, 1);
index 97fead8..cd94b25 100644 (file)
@@ -68,7 +68,7 @@ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
 static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
 
 static CFS_LIST_HEAD(ptlrpc_all_services);
-cfs_spinlock_t ptlrpc_all_services_lock;
+spinlock_t ptlrpc_all_services_lock;
 
 struct ptlrpc_request_buffer_desc *
 ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
@@ -92,10 +92,10 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
                return NULL;
        }
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
        cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
        svcpt->scp_nrqbds_total++;
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
 
        return rqbd;
 }
@@ -108,10 +108,10 @@ ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
        LASSERT(rqbd->rqbd_refcount == 0);
        LASSERT(cfs_list_empty(&rqbd->rqbd_reqs));
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
        cfs_list_del(&rqbd->rqbd_list);
        svcpt->scp_nrqbds_total--;
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
 
        OBD_FREE_LARGE(rqbd->rqbd_buffer, svcpt->scp_service->srv_buf_size);
        OBD_FREE_PTR(rqbd);
@@ -184,7 +184,7 @@ struct ptlrpc_hr_partition;
 
 struct ptlrpc_hr_thread {
        int                             hrt_id;         /* thread ID */
-       cfs_spinlock_t                  hrt_lock;
+       spinlock_t                      hrt_lock;
        cfs_waitq_t                     hrt_waitq;
        cfs_list_t                      hrt_queue;      /* RS queue */
        struct ptlrpc_hr_partition      *hrt_partition;
@@ -283,9 +283,9 @@ static void rs_batch_dispatch(struct rs_batch *b)
 
                hrt = ptlrpc_hr_select(b->rsb_svcpt);
 
-               cfs_spin_lock(&hrt->hrt_lock);
+               spin_lock(&hrt->hrt_lock);
                cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
-               cfs_spin_unlock(&hrt->hrt_lock);
+               spin_unlock(&hrt->hrt_lock);
 
                cfs_waitq_signal(&hrt->hrt_waitq);
                b->rsb_n_replies = 0;
@@ -306,20 +306,20 @@ static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs)
        if (svcpt != b->rsb_svcpt || b->rsb_n_replies >= MAX_SCHEDULED) {
                if (b->rsb_svcpt != NULL) {
                        rs_batch_dispatch(b);
-                       cfs_spin_unlock(&b->rsb_svcpt->scp_rep_lock);
+                       spin_unlock(&b->rsb_svcpt->scp_rep_lock);
                }
-               cfs_spin_lock(&svcpt->scp_rep_lock);
+               spin_lock(&svcpt->scp_rep_lock);
                b->rsb_svcpt = svcpt;
-        }
-        cfs_spin_lock(&rs->rs_lock);
-        rs->rs_scheduled_ever = 1;
-        if (rs->rs_scheduled == 0) {
-                cfs_list_move(&rs->rs_list, &b->rsb_replies);
-                rs->rs_scheduled = 1;
-                b->rsb_n_replies++;
-        }
-        rs->rs_committed = 1;
-        cfs_spin_unlock(&rs->rs_lock);
+       }
+       spin_lock(&rs->rs_lock);
+       rs->rs_scheduled_ever = 1;
+       if (rs->rs_scheduled == 0) {
+               cfs_list_move(&rs->rs_list, &b->rsb_replies);
+               rs->rs_scheduled = 1;
+               b->rsb_n_replies++;
+       }
+       rs->rs_committed = 1;
+       spin_unlock(&rs->rs_lock);
 }
 
 /**
@@ -333,7 +333,7 @@ static void rs_batch_fini(struct rs_batch *b)
 {
        if (b->rsb_svcpt != NULL) {
                rs_batch_dispatch(b);
-               cfs_spin_unlock(&b->rsb_svcpt->scp_rep_lock);
+               spin_unlock(&b->rsb_svcpt->scp_rep_lock);
        }
 }
 
@@ -362,9 +362,9 @@ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
 
        hrt = ptlrpc_hr_select(rs->rs_svcpt);
 
-       cfs_spin_lock(&hrt->hrt_lock);
+       spin_lock(&hrt->hrt_lock);
        cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
-       cfs_spin_unlock(&hrt->hrt_lock);
+       spin_unlock(&hrt->hrt_lock);
 
        cfs_waitq_signal(&hrt->hrt_waitq);
        EXIT;
@@ -406,7 +406,7 @@ void ptlrpc_commit_replies(struct obd_export *exp)
          * to attend to complete them. */
 
         /* CAVEAT EMPTOR: spinlock ordering!!! */
-        cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
+       spin_lock(&exp->exp_uncommitted_replies_lock);
         cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
                                      rs_obd_list) {
                 LASSERT (rs->rs_difficult);
@@ -417,9 +417,9 @@ void ptlrpc_commit_replies(struct obd_export *exp)
                         rs_batch_add(&batch, rs);
                 }
         }
-        cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
-        rs_batch_fini(&batch);
-        EXIT;
+       spin_unlock(&exp->exp_uncommitted_replies_lock);
+       rs_batch_fini(&batch);
+       EXIT;
 }
 EXPORT_SYMBOL(ptlrpc_commit_replies);
 
@@ -431,10 +431,10 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
        int                               posted = 0;
 
        for (;;) {
-               cfs_spin_lock(&svcpt->scp_lock);
+               spin_lock(&svcpt->scp_lock);
 
                if (cfs_list_empty(&svcpt->scp_rqbd_idle)) {
-                       cfs_spin_unlock(&svcpt->scp_lock);
+                       spin_unlock(&svcpt->scp_lock);
                        return posted;
                }
 
@@ -447,7 +447,7 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
                svcpt->scp_nrqbds_posted++;
                cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
 
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
 
                rc = ptlrpc_register_rqbd(rqbd);
                if (rc != 0)
@@ -456,7 +456,7 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
                posted = 1;
        }
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
 
        svcpt->scp_nrqbds_posted--;
        cfs_list_del(&rqbd->rqbd_list);
@@ -465,7 +465,7 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
        /* Don't complain if no request buffers are posted right now; LNET
         * won't drop requests because we set the portal lazy! */
 
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
 
        return -1;
 }
@@ -601,7 +601,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        CFS_INIT_LIST_HEAD(&svcpt->scp_threads);
 
        /* rqbd and incoming request queue */
-       cfs_spin_lock_init(&svcpt->scp_lock);
+       spin_lock_init(&svcpt->scp_lock);
        CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
        CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
        CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
@@ -611,12 +611,12 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
 
        /* acitve requests and hp requests */
-       cfs_spin_lock_init(&svcpt->scp_req_lock);
+       spin_lock_init(&svcpt->scp_req_lock);
        CFS_INIT_LIST_HEAD(&svcpt->scp_req_pending);
        CFS_INIT_LIST_HEAD(&svcpt->scp_hreq_pending);
 
        /* reply states */
-       cfs_spin_lock_init(&svcpt->scp_rep_lock);
+       spin_lock_init(&svcpt->scp_rep_lock);
        CFS_INIT_LIST_HEAD(&svcpt->scp_rep_active);
 #ifndef __KERNEL__
        CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
@@ -626,7 +626,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
 
        /* adaptive timeout */
-       cfs_spin_lock_init(&svcpt->scp_at_lock);
+       spin_lock_init(&svcpt->scp_at_lock);
        array = &svcpt->scp_at_array;
 
        size = at_est2timeout(at_max);
@@ -753,7 +753,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
                service->srv_cpt_bits++;
 
        /* public members */
-       cfs_spin_lock_init(&service->srv_lock);
+       spin_lock_init(&service->srv_lock);
        service->srv_name               = conf->psc_name;
        service->srv_watchdog_factor    = conf->psc_watchdog_factor;
        CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
@@ -800,9 +800,9 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
        rc = LNetSetLazyPortal(service->srv_req_portal);
        LASSERT(rc == 0);
 
-        cfs_spin_lock (&ptlrpc_all_services_lock);
+       spin_lock(&ptlrpc_all_services_lock);
         cfs_list_add (&service->srv_list, &ptlrpc_all_services);
-        cfs_spin_unlock (&ptlrpc_all_services_lock);
+       spin_unlock(&ptlrpc_all_services_lock);
 
         if (proc_entry != NULL)
                 ptlrpc_lprocfs_register_service(proc_entry, service);
@@ -866,12 +866,12 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
                 return;
 
        if (req->rq_at_linked) {
-               cfs_spin_lock(&svcpt->scp_at_lock);
+               spin_lock(&svcpt->scp_at_lock);
                /* recheck with lock, in case it's unlinked by
                 * ptlrpc_at_check_timed() */
                if (likely(req->rq_at_linked))
                        ptlrpc_at_remove_timed(req);
-               cfs_spin_unlock(&svcpt->scp_at_lock);
+               spin_unlock(&svcpt->scp_at_lock);
        }
 
        LASSERT(cfs_list_empty(&req->rq_timed_list));
@@ -882,7 +882,7 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
                 req->rq_export = NULL;
         }
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
 
         cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
 
@@ -918,7 +918,7 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
                                cfs_list_del(&req->rq_history_list);
                        }
 
-                       cfs_spin_unlock(&svcpt->scp_lock);
+                       spin_unlock(&svcpt->scp_lock);
 
                         cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
                                 req = cfs_list_entry(rqbd->rqbd_reqs.next,
@@ -928,7 +928,7 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
                                 ptlrpc_server_free_request(req);
                         }
 
-                       cfs_spin_lock(&svcpt->scp_lock);
+                       spin_lock(&svcpt->scp_lock);
                        /*
                         * now all reqs including the embedded req has been
                         * disposed, schedule request buffer for re-use.
@@ -939,7 +939,7 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
                                          &svcpt->scp_rqbd_idle);
                }
 
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
        } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
                /* If we are low on memory, we are not interested in history */
                cfs_list_del(&req->rq_list);
@@ -949,11 +949,11 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req)
                if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
                        svcpt->scp_hist_seq_culled = req->rq_history_seq;
 
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
 
                ptlrpc_server_free_request(req);
        } else {
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
        }
 }
 
@@ -966,11 +966,11 @@ static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
 {
        ptlrpc_server_hpreq_fini(req);
 
-       cfs_spin_lock(&svcpt->scp_req_lock);
+       spin_lock(&svcpt->scp_req_lock);
        svcpt->scp_nreqs_active--;
        if (req->rq_hp)
                svcpt->scp_nhreqs_active--;
-       cfs_spin_unlock(&svcpt->scp_req_lock);
+       spin_unlock(&svcpt->scp_req_lock);
 
        ptlrpc_server_drop_request(req);
 }
@@ -1008,11 +1008,11 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
         /* exports may get disconnected from the chain even though the
            export has references, so we must keep the spin lock while
            manipulating the lists */
-        cfs_spin_lock(&exp->exp_obd->obd_dev_lock);
+       spin_lock(&exp->exp_obd->obd_dev_lock);
 
-        if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
-                /* this one is not timed */
-                cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
+       if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
+               /* this one is not timed */
+               spin_unlock(&exp->exp_obd->obd_dev_lock);
                 RETURN_EXIT;
         }
 
@@ -1022,7 +1022,7 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
         oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
                                     struct obd_export, exp_obd_chain_timed);
         oldest_time = oldest_exp->exp_last_request_time;
-        cfs_spin_unlock(&exp->exp_obd->obd_dev_lock);
+       spin_unlock(&exp->exp_obd->obd_dev_lock);
 
         if (exp->exp_obd->obd_recovering) {
                 /* be nice to everyone during recovery */
@@ -1144,7 +1144,7 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
         if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0)
                 return(-ENOSYS);
 
-       cfs_spin_lock(&svcpt->scp_at_lock);
+       spin_lock(&svcpt->scp_at_lock);
         LASSERT(cfs_list_empty(&req->rq_timed_list));
 
         index = (unsigned long)req->rq_deadline % array->paa_size;
@@ -1167,17 +1167,17 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req)
                 cfs_list_add(&req->rq_timed_list,
                              &array->paa_reqs_array[index]);
 
-        cfs_spin_lock(&req->rq_lock);
-        req->rq_at_linked = 1;
-        cfs_spin_unlock(&req->rq_lock);
-        req->rq_at_index = index;
-        array->paa_reqs_count[index]++;
-        array->paa_count++;
-        if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
-                array->paa_deadline = req->rq_deadline;
+       spin_lock(&req->rq_lock);
+       req->rq_at_linked = 1;
+       spin_unlock(&req->rq_lock);
+       req->rq_at_index = index;
+       array->paa_reqs_count[index]++;
+       array->paa_count++;
+       if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
+               array->paa_deadline = req->rq_deadline;
                ptlrpc_at_set_timer(svcpt);
        }
-       cfs_spin_unlock(&svcpt->scp_at_lock);
+       spin_unlock(&svcpt->scp_at_lock);
 
        return 0;
 }
@@ -1193,9 +1193,9 @@ ptlrpc_at_remove_timed(struct ptlrpc_request *req)
        LASSERT(!cfs_list_empty(&req->rq_timed_list));
        cfs_list_del_init(&req->rq_timed_list);
 
-       cfs_spin_lock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
        req->rq_at_linked = 0;
-       cfs_spin_unlock(&req->rq_lock);
+       spin_unlock(&req->rq_lock);
 
        array->paa_reqs_count[req->rq_at_index]--;
        array->paa_count--;
@@ -1351,16 +1351,16 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
         int first, counter = 0;
         ENTRY;
 
-       cfs_spin_lock(&svcpt->scp_at_lock);
+       spin_lock(&svcpt->scp_at_lock);
        if (svcpt->scp_at_check == 0) {
-               cfs_spin_unlock(&svcpt->scp_at_lock);
+               spin_unlock(&svcpt->scp_at_lock);
                RETURN(0);
        }
        delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
        svcpt->scp_at_check = 0;
 
        if (array->paa_count == 0) {
-               cfs_spin_unlock(&svcpt->scp_at_lock);
+               spin_unlock(&svcpt->scp_at_lock);
                RETURN(0);
        }
 
@@ -1369,7 +1369,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
        if (first > at_early_margin) {
                /* We've still got plenty of time.  Reset the timer. */
                ptlrpc_at_set_timer(svcpt);
-               cfs_spin_unlock(&svcpt->scp_at_lock);
+               spin_unlock(&svcpt->scp_at_lock);
                RETURN(0);
        }
 
@@ -1410,7 +1410,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
        /* we have a new earliest deadline, restart the timer */
        ptlrpc_at_set_timer(svcpt);
 
-       cfs_spin_unlock(&svcpt->scp_at_lock);
+       spin_unlock(&svcpt->scp_at_lock);
 
         CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
                "replies\n", first, at_extra, counter);
@@ -1465,13 +1465,13 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service *svc,
                 if (req->rq_ops->hpreq_check)
                         rc = req->rq_ops->hpreq_check(req);
 
-                cfs_spin_lock_bh(&req->rq_export->exp_rpc_lock);
-                cfs_list_add(&req->rq_exp_list,
-                             &req->rq_export->exp_hp_rpcs);
-                cfs_spin_unlock_bh(&req->rq_export->exp_rpc_lock);
-        }
+               spin_lock_bh(&req->rq_export->exp_rpc_lock);
+               cfs_list_add(&req->rq_exp_list,
+                            &req->rq_export->exp_hp_rpcs);
+               spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+       }
 
-        RETURN(rc);
+       RETURN(rc);
 }
 
 /** Remove the request from the export list. */
@@ -1484,11 +1484,11 @@ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
                 if (req->rq_ops->hpreq_fini)
                         req->rq_ops->hpreq_fini(req);
 
-                cfs_spin_lock_bh(&req->rq_export->exp_rpc_lock);
-                cfs_list_del_init(&req->rq_exp_list);
-                cfs_spin_unlock_bh(&req->rq_export->exp_rpc_lock);
-        }
-        EXIT;
+               spin_lock_bh(&req->rq_export->exp_rpc_lock);
+               cfs_list_del_init(&req->rq_exp_list);
+               spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+       }
+       EXIT;
 }
 
 static int ptlrpc_hpreq_check(struct ptlrpc_request *req)
@@ -1528,9 +1528,9 @@ EXPORT_SYMBOL(ptlrpc_hpreq_handler);
 static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service_part *svcpt,
                                         struct ptlrpc_request *req)
 {
-        ENTRY;
+       ENTRY;
 
-        cfs_spin_lock(&req->rq_lock);
+       spin_lock(&req->rq_lock);
         if (req->rq_hp == 0) {
                 int opc = lustre_msg_get_opc(req->rq_reqmsg);
 
@@ -1540,8 +1540,8 @@ static void ptlrpc_hpreq_reorder_nolock(struct ptlrpc_service_part *svcpt,
                 if (opc != OBD_PING)
                         DEBUG_REQ(D_RPCTRACE, req, "high priority req");
         }
-        cfs_spin_unlock(&req->rq_lock);
-        EXIT;
+       spin_unlock(&req->rq_lock);
+       EXIT;
 }
 
 /**
@@ -1552,14 +1552,14 @@ void ptlrpc_hpreq_reorder(struct ptlrpc_request *req)
        struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
        ENTRY;
 
-       cfs_spin_lock(&svcpt->scp_req_lock);
+       spin_lock(&svcpt->scp_req_lock);
        /* It may happen that the request is already taken for the processing
         * but still in the export list, or the request is not in the request
         * queue but in the export list already, do not add it into the
         * HP list. */
        if (!cfs_list_empty(&req->rq_list))
                ptlrpc_hpreq_reorder_nolock(svcpt, req);
-       cfs_spin_unlock(&svcpt->scp_req_lock);
+       spin_unlock(&svcpt->scp_req_lock);
        EXIT;
 }
 EXPORT_SYMBOL(ptlrpc_hpreq_reorder);
@@ -1578,14 +1578,14 @@ static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
        if (rc < 0)
                RETURN(rc);
 
-       cfs_spin_lock(&svcpt->scp_req_lock);
+       spin_lock(&svcpt->scp_req_lock);
 
        if (rc)
                ptlrpc_hpreq_reorder_nolock(svcpt, req);
        else
                cfs_list_add_tail(&req->rq_list, &svcpt->scp_req_pending);
 
-       cfs_spin_unlock(&svcpt->scp_req_lock);
+       spin_unlock(&svcpt->scp_req_lock);
 
        RETURN(0);
 }
@@ -1728,9 +1728,9 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt)
        int                     rc;
        ENTRY;
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
        if (cfs_list_empty(&svcpt->scp_req_incoming)) {
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
                RETURN(0);
        }
 
@@ -1740,7 +1740,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt)
        svcpt->scp_nreqs_incoming--;
        /* Consider this still a "queued" request as far as stats are
         * concerned */
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
 
         /* go through security check/transform */
         rc = sptlrpc_svc_unwrap_request(req);
@@ -1855,9 +1855,9 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt)
 err_req:
        if (req->rq_export)
                class_export_rpc_put(req->rq_export);
-       cfs_spin_lock(&svcpt->scp_req_lock);
+       spin_lock(&svcpt->scp_req_lock);
        svcpt->scp_nreqs_active++;
-       cfs_spin_unlock(&svcpt->scp_req_lock);
+       spin_unlock(&svcpt->scp_req_lock);
        ptlrpc_server_finish_request(svcpt, req);
 
        RETURN(1);
@@ -1881,17 +1881,17 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
         int                    fail_opc = 0;
         ENTRY;
 
-       cfs_spin_lock(&svcpt->scp_req_lock);
+       spin_lock(&svcpt->scp_req_lock);
 #ifndef __KERNEL__
        /* !@%$# liblustre only has 1 thread */
        if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) {
-               cfs_spin_unlock(&svcpt->scp_req_lock);
+               spin_unlock(&svcpt->scp_req_lock);
                RETURN(0);
        }
 #endif
        request = ptlrpc_server_request_get(svcpt, 0);
        if  (request == NULL) {
-               cfs_spin_unlock(&svcpt->scp_req_lock);
+               spin_unlock(&svcpt->scp_req_lock);
                 RETURN(0);
         }
 
@@ -1902,14 +1902,14 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
 
         if (unlikely(fail_opc)) {
                 if (request->rq_export && request->rq_ops) {
-                       cfs_spin_unlock(&svcpt->scp_req_lock);
+                       spin_unlock(&svcpt->scp_req_lock);
 
                        OBD_FAIL_TIMEOUT(fail_opc, 4);
 
-                       cfs_spin_lock(&svcpt->scp_req_lock);
+                       spin_lock(&svcpt->scp_req_lock);
                        request = ptlrpc_server_request_get(svcpt, 0);
                        if  (request == NULL) {
-                               cfs_spin_unlock(&svcpt->scp_req_lock);
+                               spin_unlock(&svcpt->scp_req_lock);
                                RETURN(0);
                        }
                }
@@ -1920,7 +1920,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
        if (request->rq_hp)
                svcpt->scp_nhreqs_active++;
 
-       cfs_spin_unlock(&svcpt->scp_req_lock);
+       spin_unlock(&svcpt->scp_req_lock);
 
         ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
 
@@ -2074,10 +2074,10 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
         LASSERT (rs->rs_scheduled);
         LASSERT (cfs_list_empty(&rs->rs_list));
 
-        cfs_spin_lock (&exp->exp_lock);
-        /* Noop if removed already */
-        cfs_list_del_init (&rs->rs_exp_list);
-        cfs_spin_unlock (&exp->exp_lock);
+       spin_lock(&exp->exp_lock);
+       /* Noop if removed already */
+       cfs_list_del_init (&rs->rs_exp_list);
+       spin_unlock(&exp->exp_lock);
 
         /* The disk commit callback holds exp_uncommitted_replies_lock while it
          * iterates over newly committed replies, removing them from
@@ -2101,13 +2101,13 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
          * holding rs_lock, we can be sure it has all completed once we hold
          * rs_lock, which we do right next.
          */
-        if (!rs->rs_committed) {
-                cfs_spin_lock(&exp->exp_uncommitted_replies_lock);
-                cfs_list_del_init(&rs->rs_obd_list);
-                cfs_spin_unlock(&exp->exp_uncommitted_replies_lock);
-        }
+       if (!rs->rs_committed) {
+               spin_lock(&exp->exp_uncommitted_replies_lock);
+               cfs_list_del_init(&rs->rs_obd_list);
+               spin_unlock(&exp->exp_uncommitted_replies_lock);
+       }
 
-        cfs_spin_lock(&rs->rs_lock);
+       spin_lock(&rs->rs_lock);
 
         been_handled = rs->rs_handled;
         rs->rs_handled = 1;
@@ -2126,26 +2126,25 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
         }
 
         if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
-                cfs_spin_unlock(&rs->rs_lock);
+               spin_unlock(&rs->rs_lock);
 
-                if (!been_handled && rs->rs_on_net) {
-                        LNetMDUnlink(rs->rs_md_h);
-                        /* Ignore return code; we're racing with
-                         * completion... */
-                }
+               if (!been_handled && rs->rs_on_net) {
+                       LNetMDUnlink(rs->rs_md_h);
+                       /* Ignore return code; we're racing with completion */
+               }
 
-                while (nlocks-- > 0)
-                        ldlm_lock_decref(&rs->rs_locks[nlocks],
-                                         rs->rs_modes[nlocks]);
+               while (nlocks-- > 0)
+                       ldlm_lock_decref(&rs->rs_locks[nlocks],
+                                        rs->rs_modes[nlocks]);
 
-                cfs_spin_lock(&rs->rs_lock);
-        }
+               spin_lock(&rs->rs_lock);
+       }
 
-        rs->rs_scheduled = 0;
+       rs->rs_scheduled = 0;
 
-        if (!rs->rs_on_net) {
-                /* Off the net */
-                cfs_spin_unlock(&rs->rs_lock);
+       if (!rs->rs_on_net) {
+               /* Off the net */
+               spin_unlock(&rs->rs_lock);
 
                 class_export_put (exp);
                 rs->rs_export = NULL;
@@ -2157,7 +2156,7 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
        }
 
        /* still on the net; callback will schedule */
-       cfs_spin_unlock(&rs->rs_lock);
+       spin_unlock(&rs->rs_lock);
        RETURN(1);
 }
 
@@ -2177,14 +2176,14 @@ ptlrpc_server_handle_reply(struct ptlrpc_service_part *svcpt)
        struct ptlrpc_reply_state *rs = NULL;
        ENTRY;
 
-       cfs_spin_lock(&svcpt->scp_rep_lock);
+       spin_lock(&svcpt->scp_rep_lock);
        if (!cfs_list_empty(&svcpt->scp_rep_queue)) {
                rs = cfs_list_entry(svcpt->scp_rep_queue.prev,
                                    struct ptlrpc_reply_state,
                                    rs_list);
                cfs_list_del_init(&rs->rs_list);
        }
-       cfs_spin_unlock(&svcpt->scp_rep_lock);
+       spin_unlock(&svcpt->scp_rep_lock);
        if (rs != NULL)
                ptlrpc_handle_rs(rs);
        RETURN(rs != NULL);
@@ -2438,7 +2437,7 @@ static int ptlrpc_main(void *arg)
                 goto out_srv_fini;
         }
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
 
        LASSERT(thread_is_starting(thread));
        thread_clear_flags(thread, SVC_STARTING);
@@ -2452,7 +2451,7 @@ static int ptlrpc_main(void *arg)
         * we are now running, however we will exit as soon as possible */
        thread_add_flags(thread, SVC_RUNNING);
        svcpt->scp_nthrs_running++;
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
 
        /* wake up our creator in case he's still waiting. */
        cfs_waitq_signal(&thread->t_ctl_waitq);
@@ -2460,10 +2459,10 @@ static int ptlrpc_main(void *arg)
        thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
                                             NULL, NULL);
 
-       cfs_spin_lock(&svcpt->scp_rep_lock);
+       spin_lock(&svcpt->scp_rep_lock);
        cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
        cfs_waitq_signal(&svcpt->scp_rep_waitq);
-       cfs_spin_unlock(&svcpt->scp_rep_lock);
+       spin_unlock(&svcpt->scp_rep_lock);
 
        CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id,
               svcpt->scp_nthrs_running);
@@ -2527,7 +2526,7 @@ out:
         CDEBUG(D_RPCTRACE, "service thread [ %p : %u ] %d exiting: rc %d\n",
                thread, thread->t_pid, thread->t_id, rc);
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
        if (thread_test_and_clear_flags(thread, SVC_STARTING))
                svcpt->scp_nthrs_starting--;
 
@@ -2540,7 +2539,7 @@ out:
        thread_add_flags(thread, SVC_STOPPED);
 
        cfs_waitq_signal(&thread->t_ctl_waitq);
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
 
        return rc;
 }
@@ -2550,12 +2549,12 @@ static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
 {
        int result;
 
-       cfs_spin_lock(&hrt->hrt_lock);
+       spin_lock(&hrt->hrt_lock);
 
        cfs_list_splice_init(&hrt->hrt_queue, replies);
        result = ptlrpc_hr.hr_stopping || !cfs_list_empty(replies);
 
-       cfs_spin_unlock(&hrt->hrt_lock);
+       spin_unlock(&hrt->hrt_lock);
        return result;
 }
 
@@ -2669,7 +2668,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
        CDEBUG(D_INFO, "Stopping threads for service %s\n",
               svcpt->scp_service->srv_name);
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
        /* let the thread know that we would like it to stop asap */
        list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
                CDEBUG(D_INFO, "Stopping thread %s #%u\n",
@@ -2687,17 +2686,17 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
                        cfs_list_add(&thread->t_link, &zombie);
                        continue;
                }
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
 
                CDEBUG(D_INFO, "waiting for stopping-thread %s #%u\n",
                       svcpt->scp_service->srv_thread_name, thread->t_id);
                l_wait_event(thread->t_ctl_waitq,
                             thread_is_stopped(thread), &lwi);
 
-               cfs_spin_lock(&svcpt->scp_lock);
+               spin_lock(&svcpt->scp_lock);
        }
 
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
 
        while (!cfs_list_empty(&zombie)) {
                thread = cfs_list_entry(zombie.next,
@@ -2786,9 +2785,9 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
                RETURN(-ENOMEM);
        cfs_waitq_init(&thread->t_ctl_waitq);
 
-       cfs_spin_lock(&svcpt->scp_lock);
+       spin_lock(&svcpt->scp_lock);
        if (!ptlrpc_threads_increasable(svcpt)) {
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
                OBD_FREE_PTR(thread);
                RETURN(-EMFILE);
        }
@@ -2797,7 +2796,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
                /* serialize starting because some modules (obdfilter)
                 * might require unique and contiguous t_id */
                LASSERT(svcpt->scp_nthrs_starting == 1);
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
                OBD_FREE_PTR(thread);
                if (wait) {
                        CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n",
@@ -2817,7 +2816,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
        thread->t_svcpt = svcpt;
 
        cfs_list_add(&thread->t_link, &svcpt->scp_threads);
-       cfs_spin_unlock(&svcpt->scp_lock);
+       spin_unlock(&svcpt->scp_lock);
 
        if (svcpt->scp_cpt >= 0) {
                snprintf(thread->t_name, PTLRPC_THR_NAME_LEN, "%s%02d_%03d",
@@ -2836,10 +2835,10 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
        if (rc < 0) {
                CERROR("cannot start thread '%s': rc %d\n",
                       thread->t_name, rc);
-               cfs_spin_lock(&svcpt->scp_lock);
+               spin_lock(&svcpt->scp_lock);
                cfs_list_del(&thread->t_link);
                --svcpt->scp_nthrs_starting;
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
 
                 OBD_FREE(thread, sizeof(*thread));
                 RETURN(rc);
@@ -2896,7 +2895,7 @@ int ptlrpc_hr_init(void)
                        hrt->hrt_id = j;
                        hrt->hrt_partition = hrp;
                        cfs_waitq_init(&hrt->hrt_waitq);
-                       cfs_spin_lock_init(&hrt->hrt_lock);
+                       spin_lock_init(&hrt->hrt_lock);
                        CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
                }
        }
@@ -2998,9 +2997,9 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
 
                /* Wait for the network to release any buffers
                 * it's currently filling */
-               cfs_spin_lock(&svcpt->scp_lock);
+               spin_lock(&svcpt->scp_lock);
                while (svcpt->scp_nrqbds_posted != 0) {
-                       cfs_spin_unlock(&svcpt->scp_lock);
+                       spin_unlock(&svcpt->scp_lock);
                        /* Network access will complete in finite time but
                         * the HUGE timeout lets us CWARN for visibility
                         * of sluggish NALs */
@@ -3014,9 +3013,9 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
                                      "request buffers\n",
                                      svcpt->scp_service->srv_name);
                        }
-                       cfs_spin_lock(&svcpt->scp_lock);
+                       spin_lock(&svcpt->scp_lock);
                }
-               cfs_spin_unlock(&svcpt->scp_lock);
+               spin_unlock(&svcpt->scp_lock);
        }
 }
 
@@ -3033,15 +3032,15 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc)
                if (svcpt->scp_service == NULL)
                        break;
 
-               cfs_spin_lock(&svcpt->scp_rep_lock);
+               spin_lock(&svcpt->scp_rep_lock);
                while (!cfs_list_empty(&svcpt->scp_rep_active)) {
                        rs = cfs_list_entry(svcpt->scp_rep_active.next,
                                            struct ptlrpc_reply_state, rs_list);
-                       cfs_spin_lock(&rs->rs_lock);
+                       spin_lock(&rs->rs_lock);
                        ptlrpc_schedule_difficult_reply(rs);
-                       cfs_spin_unlock(&rs->rs_lock);
+                       spin_unlock(&rs->rs_lock);
                }
-               cfs_spin_unlock(&svcpt->scp_rep_lock);
+               spin_unlock(&svcpt->scp_rep_lock);
 
                /* purge the request queue.  NB No new replies (rqbds
                 * all unlinked) and no service threads, so I'm the only
@@ -3141,9 +3140,9 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
 
        service->srv_is_stopping = 1;
 
-       cfs_spin_lock(&ptlrpc_all_services_lock);
+       spin_lock(&ptlrpc_all_services_lock);
        cfs_list_del_init(&service->srv_list);
-       cfs_spin_unlock(&ptlrpc_all_services_lock);
+       spin_unlock(&ptlrpc_all_services_lock);
 
        ptlrpc_lprocfs_unregister_service(service);
 
@@ -3172,9 +3171,9 @@ int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
 
        cfs_gettimeofday(&right_now);
 
-       cfs_spin_lock(&svcpt->scp_req_lock);
+       spin_lock(&svcpt->scp_req_lock);
        if (!ptlrpc_server_request_pending(svcpt, 1)) {
-               cfs_spin_unlock(&svcpt->scp_req_lock);
+               spin_unlock(&svcpt->scp_req_lock);
                return 0;
        }
 
@@ -3188,7 +3187,7 @@ int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
        }
 
        timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
-       cfs_spin_unlock(&svcpt->scp_req_lock);
+       spin_unlock(&svcpt->scp_req_lock);
 
        if ((timediff / ONE_MILLION) >
            (AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
index 990b45b..29bdd1b 100644 (file)
@@ -85,7 +85,7 @@ struct lquota_mst_entry {
 
        /* r/w semaphore used to protect concurrent access to the quota
         * parameters which are stored on disk */
-       cfs_rw_semaphore_t      lme_sem;
+       struct rw_semaphore     lme_sem;
 
        /* quota space that may be released after glimpse */
        __u64                   lme_may_rel;
@@ -113,7 +113,7 @@ struct lquota_slv_entry {
        unsigned int            lse_pending_req;
 
        /* rw spinlock protecting in-memory counters (i.e. lse_pending*) */
-       cfs_rwlock_t            lse_lock;
+       rwlock_t                lse_lock;
 
        /* waiter for pending request done */
        cfs_waitq_t             lse_waiters;
@@ -236,33 +236,33 @@ static inline int lqe_is_master(struct lquota_entry *lqe)
 static inline void lqe_write_lock(struct lquota_entry *lqe)
 {
        if (lqe_is_master(lqe))
-               cfs_down_write(&lqe->lqe_sem);
+               down_write(&lqe->lqe_sem);
        else
-               cfs_write_lock(&lqe->lqe_lock);
+               write_lock(&lqe->lqe_lock);
 }
 
 static inline void lqe_write_unlock(struct lquota_entry *lqe)
 {
        if (lqe_is_master(lqe))
-               cfs_up_write(&lqe->lqe_sem);
+               up_write(&lqe->lqe_sem);
        else
-               cfs_write_unlock(&lqe->lqe_lock);
+               write_unlock(&lqe->lqe_lock);
 }
 
 static inline void lqe_read_lock(struct lquota_entry *lqe)
 {
        if (lqe_is_master(lqe))
-               cfs_down_read(&lqe->lqe_sem);
+               down_read(&lqe->lqe_sem);
        else
-               cfs_read_lock(&lqe->lqe_lock);
+               read_lock(&lqe->lqe_lock);
 }
 
 static inline void lqe_read_unlock(struct lquota_entry *lqe)
 {
        if (lqe_is_master(lqe))
-               cfs_up_read(&lqe->lqe_sem);
+               up_read(&lqe->lqe_sem);
        else
-               cfs_read_unlock(&lqe->lqe_lock);
+               read_unlock(&lqe->lqe_lock);
 }
 
 /*
index da01c51..3563aeb 100644 (file)
@@ -242,7 +242,7 @@ static int qmt_device_init0(const struct lu_env *env, struct qmt_device *qmt,
        thread_set_flags(&qmt->qmt_reba_thread, SVC_STOPPED);
        cfs_waitq_init(&qmt->qmt_reba_thread.t_ctl_waitq);
        CFS_INIT_LIST_HEAD(&qmt->qmt_reba_list);
-       cfs_spin_lock_init(&qmt->qmt_reba_lock);
+       spin_lock_init(&qmt->qmt_reba_lock);
        rc = qmt_start_reba_thread(qmt);
        if (rc) {
                CERROR("%s: failed to start rebalance thread (%d)\n",
index fa47f52..9c6b9a3 100644 (file)
@@ -47,7 +47,7 @@ static void qmt_lqe_init(struct lquota_entry *lqe, void *arg)
        LASSERT(lqe_is_master(lqe));
 
        lqe->lqe_revoke_time = 0;
-       cfs_init_rwsem(&lqe->lqe_sem);
+       init_rwsem(&lqe->lqe_sem);
 }
 
 /*
index 7cad493..d85bebe 100644 (file)
@@ -80,7 +80,7 @@ struct qmt_device {
        cfs_list_t               qmt_reba_list;
 
        /* lock protecting rebalancing list */
-       cfs_spinlock_t           qmt_reba_lock;
+       spinlock_t               qmt_reba_lock;
 
        unsigned long            qmt_stopping:1; /* qmt is stopping */
 
@@ -156,7 +156,7 @@ static inline struct qmt_pool_info *lqe2qpi(struct lquota_entry *lqe)
 static inline bool lqe_is_locked(struct lquota_entry *lqe)
 {
        LASSERT(lqe_is_master(lqe));
-       if (cfs_down_write_trylock(&lqe->lqe_sem) == 0)
+       if (down_write_trylock(&lqe->lqe_sem) == 0)
                return true;
        lqe_write_unlock(lqe);
        return false;
index 3067ce7..8b7abb6 100644 (file)
@@ -678,12 +678,12 @@ void qmt_id_lock_notify(struct qmt_device *qmt, struct lquota_entry *lqe)
        ENTRY;
 
        lqe_getref(lqe);
-       cfs_spin_lock(&qmt->qmt_reba_lock);
+       spin_lock(&qmt->qmt_reba_lock);
        if (!qmt->qmt_stopping && cfs_list_empty(&lqe->lqe_link)) {
                cfs_list_add_tail(&lqe->lqe_link, &qmt->qmt_reba_list);
                added = true;
        }
-       cfs_spin_unlock(&qmt->qmt_reba_lock);
+       spin_unlock(&qmt->qmt_reba_lock);
 
        if (added)
                cfs_waitq_signal(&qmt->qmt_reba_thread.t_ctl_waitq);
@@ -735,19 +735,19 @@ static int qmt_reba_thread(void *arg)
                             !cfs_list_empty(&qmt->qmt_reba_list) ||
                             !thread_is_running(thread), &lwi);
 
-               cfs_spin_lock(&qmt->qmt_reba_lock);
+               spin_lock(&qmt->qmt_reba_lock);
                cfs_list_for_each_entry_safe(lqe, tmp, &qmt->qmt_reba_list,
                                             lqe_link) {
                        cfs_list_del_init(&lqe->lqe_link);
-                       cfs_spin_unlock(&qmt->qmt_reba_lock);
+                       spin_unlock(&qmt->qmt_reba_lock);
 
                        if (thread_is_running(thread))
                                qmt_id_lock_glimpse(env, qmt, lqe, NULL);
 
                        lqe_putref(lqe);
-                       cfs_spin_lock(&qmt->qmt_reba_lock);
+                       spin_lock(&qmt->qmt_reba_lock);
                }
-               cfs_spin_unlock(&qmt->qmt_reba_lock);
+               spin_unlock(&qmt->qmt_reba_lock);
 
                if (!thread_is_running(thread))
                        break;
index 0ad0d64..489034b 100644 (file)
@@ -53,7 +53,7 @@ void qsd_put_fsinfo(struct qsd_fsinfo *qfs)
        ENTRY;
        LASSERT(qfs != NULL);
 
-       cfs_spin_lock(&qfs_list_lock);
+       spin_lock(&qfs_list_lock);
        LASSERT(qfs->qfs_ref > 0);
        qfs->qfs_ref--;
        if (qfs->qfs_ref == 0) {
@@ -61,7 +61,7 @@ void qsd_put_fsinfo(struct qsd_fsinfo *qfs)
                cfs_list_del(&qfs->qfs_link);
                OBD_FREE_PTR(qfs);
        }
-       cfs_spin_unlock(&qfs_list_lock);
+       spin_unlock(&qfs_list_lock);
        EXIT;
 }
 
@@ -92,14 +92,14 @@ struct qsd_fsinfo *qsd_get_fsinfo(char *name, bool create)
                if (new == NULL)
                        RETURN(NULL);
 
-               cfs_sema_init(&new->qfs_sem, 1);
+               sema_init(&new->qfs_sem, 1);
                CFS_INIT_LIST_HEAD(&new->qfs_qsd_list);
                strcpy(new->qfs_name, name);
                new->qfs_ref = 1;
        }
 
        /* search in the fsinfo list */
-       cfs_spin_lock(&qfs_list_lock);
+       spin_lock(&qfs_list_lock);
        cfs_list_for_each_entry(qfs, &qfs_list, qfs_link) {
                if (!strcmp(qfs->qfs_name, name)) {
                        qfs->qfs_ref++;
@@ -116,7 +116,7 @@ struct qsd_fsinfo *qsd_get_fsinfo(char *name, bool create)
                new = NULL;
        }
 out:
-       cfs_spin_unlock(&qfs_list_lock);
+       spin_unlock(&qfs_list_lock);
 
        if (new)
                OBD_FREE_PTR(new);
@@ -164,7 +164,7 @@ int qsd_process_config(struct lustre_cfg *lcfg)
        if (strchr(valstr, 'g'))
                enabled |= 1 << GRPQUOTA;
 
-       cfs_down(&qfs->qfs_sem);
+       down(&qfs->qfs_sem);
        if (qfs->qfs_enabled[pool - LQUOTA_FIRST_RES] == enabled)
                /* no change required */
                GOTO(out, rc = 0);
@@ -185,10 +185,10 @@ int qsd_process_config(struct lustre_cfg *lcfg)
 
                        /* start reintegration only if qsd_prepare() was
                         * successfully called */
-                       cfs_read_lock(&qsd->qsd_lock);
+                       read_lock(&qsd->qsd_lock);
                        if (!qsd->qsd_prepared)
                                skip = true;
-                       cfs_read_unlock(&qsd->qsd_lock);
+                       read_unlock(&qsd->qsd_lock);
                        if (skip)
                                continue;
 
@@ -199,7 +199,7 @@ int qsd_process_config(struct lustre_cfg *lcfg)
                }
        }
 out:
-       cfs_up(&qfs->qfs_sem);
+       up(&qfs->qfs_sem);
        qsd_put_fsinfo(qfs);
        RETURN(0);
 }
index 993c1e3..4dbbeb0 100644 (file)
@@ -47,7 +47,7 @@ static void qsd_lqe_init(struct lquota_entry *lqe, void *arg)
        LASSERT(!lqe_is_master(lqe));
 
        /* initialize slave parameters */
-       cfs_rwlock_init(&lqe->lqe_lock);
+       rwlock_init(&lqe->lqe_lock);
        memset(&lqe->lqe_lockh, 0, sizeof(lqe->lqe_lockh));
        lqe->lqe_pending_write = 0;
        lqe->lqe_pending_req   = 0;
index 01b3e54..3b87ddb 100644 (file)
@@ -94,10 +94,10 @@ static int qsd_ready(struct lquota_entry *lqe, struct lustre_handle *lockh)
        struct ldlm_lock        *lock;
        ENTRY;
 
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        /* is the qsd about to shut down? */
        if (qsd->qsd_stopping) {
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
                LQUOTA_DEBUG(lqe, "dropping quota req since qsd is stopping");
                /* Target is about to shut down, client will retry */
                RETURN(-EINPROGRESS);
@@ -107,7 +107,7 @@ static int qsd_ready(struct lquota_entry *lqe, struct lustre_handle *lockh)
        if (qsd->qsd_exp_valid)
                imp = class_exp2cliimp(qsd->qsd_exp);
        if (imp == NULL || imp->imp_invalid) {
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
                LQUOTA_DEBUG(lqe, "connection to master not ready");
                RETURN(-ENOTCONN);
        }
@@ -120,7 +120,7 @@ static int qsd_ready(struct lquota_entry *lqe, struct lustre_handle *lockh)
         * If the previous reintegration failed for some reason, we'll
         * re-trigger it here as well. */
        if (!qqi->qqi_glb_uptodate || !qqi->qqi_slv_uptodate) {
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
                LQUOTA_DEBUG(lqe, "not up-to-date, dropping request and "
                             "kicking off reintegration");
                qsd_start_reint_thread(qqi);
@@ -130,7 +130,7 @@ static int qsd_ready(struct lquota_entry *lqe, struct lustre_handle *lockh)
        /* Fill the remote global lock handle, master will check this handle
         * to see if the slave is sending request with stale lock */
        lustre_handle_copy(lockh, &qqi->qqi_lockh);
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
 
        if (!lustre_handle_is_used(lockh))
                RETURN(-ENOLCK);
@@ -835,12 +835,12 @@ int qsd_op_begin(const struct lu_env *env, struct qsd_instance *qsd,
                RETURN(0);
 
        /* We don't enforce quota until the qsd_instance is started */
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        if (!qsd->qsd_started) {
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
                RETURN(0);
        }
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
 
        /* ignore block quota on MDTs, ignore inode quota on OSTs */
        if ((!qsd->qsd_is_md && !qi->lqi_is_blk) ||
@@ -1075,12 +1075,12 @@ void qsd_op_end(const struct lu_env *env, struct qsd_instance *qsd,
                RETURN_EXIT;
 
        /* We don't enforce quota until the qsd_instance is started */
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        if (!qsd->qsd_started) {
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
                RETURN_EXIT;
        }
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
 
        LASSERT(trans != NULL);
 
@@ -1127,12 +1127,12 @@ void qsd_op_adjust(const struct lu_env *env, struct qsd_instance *qsd,
                RETURN_EXIT;
 
        /* We don't enforce quota until the qsd_instance is started */
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        if (!qsd->qsd_started) {
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
                RETURN_EXIT;
        }
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
 
        qqi = qsd->qsd_type_array[qtype];
        LASSERT(qqi);
@@ -1141,12 +1141,12 @@ void qsd_op_adjust(const struct lu_env *env, struct qsd_instance *qsd,
            qid->qid_uid == 0)
                RETURN_EXIT;
 
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        if (!qsd->qsd_started) {
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
                RETURN_EXIT;
        }
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
 
        lqe = lqe_locate(env, qqi->qqi_site, qid);
        if (IS_ERR(lqe)) {
index c640a69..3f9f1e4 100644 (file)
@@ -82,7 +82,7 @@ struct qsd_instance {
        cfs_list_t               qsd_adjust_list;
 
        /* lock protecting adjust list */
-       cfs_spinlock_t           qsd_adjust_lock;
+       spinlock_t               qsd_adjust_lock;
 
        /* dedicated thread for updating slave index files. */
        struct ptlrpc_thread     qsd_upd_thread;
@@ -95,7 +95,7 @@ struct qsd_instance {
         * - the qsd update list
         * - the deferred list
         * - flags of the qsd_qtype_info */
-       cfs_rwlock_t             qsd_lock;
+       rwlock_t                 qsd_lock;
 
        /* Default quota settings which apply to all identifiers */
        /* when blk qunit reaches this value, later write reqs from client
@@ -190,7 +190,7 @@ struct qsd_fsinfo {
 
        /* list of all qsd_instance for this fs */
        cfs_list_t              qfs_qsd_list;
-       cfs_semaphore_t         qfs_sem;
+       struct semaphore        qfs_sem;
 
        /* link to the global quota fsinfo list.  */
        cfs_list_t              qfs_link;
index 64c537e..3a6ca61 100644 (file)
@@ -146,7 +146,7 @@ static int lprocfs_qsd_wr_force_reint(struct file *file, const char *buffer,
 
        LASSERT(qsd != NULL);
 
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
        if (qsd->qsd_stopping) {
                /* don't mess up with shutdown procedure, it is already
                 * complicated enough */
@@ -160,7 +160,7 @@ static int lprocfs_qsd_wr_force_reint(struct file *file, const char *buffer,
                        qsd->qsd_type_array[qtype]->qqi_slv_uptodate = false;
                }
        }
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        if (rc)
                return rc;
@@ -229,10 +229,10 @@ static int qsd_conn_callback(void *data)
        ldlm_namespace_get(class_exp2obd(qsd->qsd_exp)->obd_namespace);
        qsd->qsd_ns = class_exp2obd(qsd->qsd_exp)->obd_namespace;
 
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
        /* notify that qsd_exp is now valid */
        qsd->qsd_exp_valid = true;
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        /* Now that the connection to master is setup, we can initiate the
         * reintegration procedure for quota types which are enabled.
@@ -446,16 +446,16 @@ void qsd_fini(const struct lu_env *env, struct qsd_instance *qsd)
                RETURN_EXIT;
 
        CDEBUG(D_QUOTA, "%s: initiating QSD shutdown\n", qsd->qsd_svname);
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
        qsd->qsd_stopping = true;
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        /* remove from the list of fsinfo */
        if (!cfs_list_empty(&qsd->qsd_link)) {
                LASSERT(qsd->qsd_fsinfo != NULL);
-               cfs_down(&qsd->qsd_fsinfo->qfs_sem);
+               down(&qsd->qsd_fsinfo->qfs_sem);
                cfs_list_del_init(&qsd->qsd_link);
-               cfs_up(&qsd->qsd_fsinfo->qfs_sem);
+               up(&qsd->qsd_fsinfo->qfs_sem);
        }
 
        /* remove qsd proc entry */
@@ -544,12 +544,12 @@ struct qsd_instance *qsd_init(const struct lu_env *env, char *svname,
                RETURN(ERR_PTR(-ENOMEM));
 
        /* generic initializations */
-       cfs_rwlock_init(&qsd->qsd_lock);
+       rwlock_init(&qsd->qsd_lock);
        CFS_INIT_LIST_HEAD(&qsd->qsd_link);
        thread_set_flags(&qsd->qsd_upd_thread, SVC_STOPPED);
        cfs_waitq_init(&qsd->qsd_upd_thread.t_ctl_waitq);
        CFS_INIT_LIST_HEAD(&qsd->qsd_upd_list);
-       cfs_spin_lock_init(&qsd->qsd_adjust_lock);
+       spin_lock_init(&qsd->qsd_adjust_lock);
        CFS_INIT_LIST_HEAD(&qsd->qsd_adjust_list);
        qsd->qsd_prepared = false;
        qsd->qsd_started = false;
@@ -582,9 +582,9 @@ struct qsd_instance *qsd_init(const struct lu_env *env, char *svname,
        }
 
        /* add in the list of lquota_fsinfo */
-       cfs_down(&qsd->qsd_fsinfo->qfs_sem);
+       down(&qsd->qsd_fsinfo->qfs_sem);
        list_add_tail(&qsd->qsd_link, &qsd->qsd_fsinfo->qfs_qsd_list);
-       cfs_up(&qsd->qsd_fsinfo->qfs_sem);
+       up(&qsd->qsd_fsinfo->qfs_sem);
 
        /* register procfs directory */
        qsd->qsd_proc = lprocfs_register(QSD_DIR, osd_proc,
@@ -630,12 +630,12 @@ int qsd_prepare(const struct lu_env *env, struct qsd_instance *qsd)
        if (unlikely(qsd == NULL))
                RETURN(0);
 
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        if (qsd->qsd_prepared) {
                CERROR("%s: qsd instance already prepared\n", qsd->qsd_svname);
                rc = -EALREADY;
        }
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
        if (rc)
                RETURN(rc);
 
@@ -667,9 +667,9 @@ int qsd_prepare(const struct lu_env *env, struct qsd_instance *qsd)
        }
 
        /* pools successfully setup, mark the qsd as prepared */
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
        qsd->qsd_prepared = true;
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        /* start reintegration thread for each type, if required */
        for (qtype = USRQUOTA; qtype < MAXQUOTAS; qtype++) {
@@ -731,7 +731,7 @@ int qsd_start(const struct lu_env *env, struct qsd_instance *qsd)
        if (unlikely(qsd == NULL))
                RETURN(0);
 
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
        if (!qsd->qsd_prepared) {
                CERROR("%s: can't start qsd instance since it was properly "
                       "initialized\n", qsd->qsd_svname);
@@ -743,7 +743,7 @@ int qsd_start(const struct lu_env *env, struct qsd_instance *qsd)
                /* notify that the qsd_instance is now started */
                qsd->qsd_started = true;
        }
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        if (rc)
                RETURN(rc);
index f49abb6..a21505d 100644 (file)
@@ -175,12 +175,12 @@ static int qsd_glb_blocking_ast(struct ldlm_lock *lock,
 
                /* we are losing the global index lock, so let's mark the
                 * global & slave indexes as not up-to-date any more */
-               cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+               write_lock(&qqi->qqi_qsd->qsd_lock);
                qqi->qqi_glb_uptodate = false;
                qqi->qqi_slv_uptodate = false;
                if (lock->l_handle.h_cookie == qqi->qqi_lockh.cookie)
                        memset(&qqi->qqi_lockh, 0, sizeof(qqi->qqi_lockh));
-               cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+               write_unlock(&qqi->qqi_qsd->qsd_lock);
 
                CDEBUG(D_QUOTA, "%s: losing global index lock for %s type\n",
                       qqi->qqi_qsd->qsd_svname, QTYPE_NAME((qqi->qqi_qtype)));
index 261d5dc..c558bc9 100644 (file)
@@ -385,9 +385,9 @@ static int qsd_connected(struct qsd_instance *qsd)
 {
        int     connected;
 
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        connected = qsd->qsd_exp_valid ? 1 : 0;
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
 
        return connected;
 }
@@ -396,9 +396,9 @@ static int qsd_started(struct qsd_instance *qsd)
 {
        int     started;
 
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        started = qsd->qsd_started ? 1 : 0;
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
 
        return started;
 }
@@ -451,17 +451,17 @@ static int qsd_reint_main(void *args)
 
        memset(&qti->qti_lvb, 0, sizeof(qti->qti_lvb));
 
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        /* check whether we already own a global quota lock for this type */
        if (lustre_handle_is_used(&qqi->qqi_lockh) &&
            ldlm_lock_addref_try(&qqi->qqi_lockh, qsd_glb_einfo.ei_mode) == 0) {
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
                /* force refresh of global & slave index copy */
                qti->qti_lvb.l_lquota.lvb_glb_ver = ~0ULL;
                qti->qti_slv_ver = ~0ULL;
        } else {
                /* no valid lock found, let's enqueue a new one */
-               cfs_read_unlock(&qsd->qsd_lock);
+               read_unlock(&qsd->qsd_lock);
 
                memset(&qti->qti_body, 0, sizeof(qti->qti_body));
                memcpy(&qti->qti_body.qb_fid, &qqi->qqi_fid,
@@ -532,9 +532,9 @@ out_env_init:
 out_env:
        OBD_FREE_PTR(env);
 out:
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
        qqi->qqi_reint = 0;
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        qqi_putref(qqi);
        lu_ref_del(&qqi->qqi_reference, "reint_thread", thread);
@@ -584,20 +584,20 @@ static bool qsd_pending_updates(struct qsd_qtype_info *qqi)
        ENTRY;
 
        /* any pending quota adjust? */
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
+       spin_lock(&qsd->qsd_adjust_lock);
        cfs_list_for_each_entry_safe(lqe, n, &qsd->qsd_adjust_list, lqe_link) {
                if (lqe2qqi(lqe) == qqi) {
                        cfs_list_del_init(&lqe->lqe_link);
                        lqe_putref(lqe);
                }
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 
        /* any pending updates? */
-       cfs_read_lock(&qsd->qsd_lock);
+       read_lock(&qsd->qsd_lock);
        cfs_list_for_each_entry(upd, &qsd->qsd_upd_list, qur_link) {
                if (upd->qur_qqi == qqi) {
-                       cfs_read_unlock(&qsd->qsd_lock);
+                       read_unlock(&qsd->qsd_lock);
                        CDEBUG(D_QUOTA, "%s: pending %s updates for type:%d.\n",
                               qsd->qsd_svname,
                               upd->qur_global ? "global" : "slave",
@@ -605,7 +605,7 @@ static bool qsd_pending_updates(struct qsd_qtype_info *qqi)
                        GOTO(out, updates = true);
                }
        }
-       cfs_read_unlock(&qsd->qsd_lock);
+       read_unlock(&qsd->qsd_lock);
 
        /* any pending quota request? */
        cfs_hash_for_each_safe(qqi->qqi_site->lqs_hash, qsd_entry_iter_cb,
@@ -637,33 +637,33 @@ int qsd_start_reint_thread(struct qsd_qtype_info *qqi)
                RETURN(0);
 
        /* check if the reintegration has already started or finished */
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
 
        if ((qqi->qqi_glb_uptodate && qqi->qqi_slv_uptodate) ||
             qqi->qqi_reint || qsd->qsd_stopping) {
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
                RETURN(0);
        }
        qqi->qqi_reint = 1;
 
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        /* there could be some unfinished global or index entry updates
         * (very unlikely), to avoid them messing up with the reint
         * procedure, we just return and try to re-start reint later. */
        if (qsd_pending_updates(qqi)) {
-               cfs_write_lock(&qsd->qsd_lock);
+               write_lock(&qsd->qsd_lock);
                qqi->qqi_reint = 0;
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
                RETURN(0);
        }
 
        rc = cfs_create_thread(qsd_reint_main, (void *)qqi, 0);
        if (rc < 0) {
                thread_set_flags(thread, SVC_STOPPED);
-               cfs_write_lock(&qsd->qsd_lock);
+               write_lock(&qsd->qsd_lock);
                qqi->qqi_reint = 0;
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
                RETURN(rc);
        }
 
index 360f694..70d6a81 100644 (file)
@@ -191,14 +191,14 @@ void qsd_bump_version(struct qsd_qtype_info *qqi, __u64 ver, bool global)
        idx_ver = global ? &qqi->qqi_glb_ver : &qqi->qqi_slv_ver;
        list    = global ? &qqi->qqi_deferred_glb : &qqi->qqi_deferred_slv;
 
-       cfs_write_lock(&qqi->qqi_qsd->qsd_lock);
+       write_lock(&qqi->qqi_qsd->qsd_lock);
        *idx_ver = ver;
        if (global)
                qqi->qqi_glb_uptodate = 1;
        else
                qqi->qqi_slv_uptodate = 1;
        qsd_kickoff_deferred(qqi, list, ver);
-       cfs_write_unlock(&qqi->qqi_qsd->qsd_lock);
+       write_unlock(&qqi->qqi_qsd->qsd_lock);
 }
 
 /*
@@ -230,13 +230,13 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
        /* If we don't want update index version, no need to sort the
         * records in version order, just schedule the updates instantly. */
        if (ver == 0) {
-               cfs_write_lock(&qsd->qsd_lock);
+               write_lock(&qsd->qsd_lock);
                qsd_upd_add(qsd, upd);
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
                RETURN_EXIT;
        }
 
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
 
        cur_ver = global ? qqi->qqi_glb_ver : qqi->qqi_slv_ver;
 
@@ -264,7 +264,7 @@ void qsd_upd_schedule(struct qsd_qtype_info *qqi, struct lquota_entry *lqe,
                qsd_add_deferred(list, upd);
        }
 
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
 
        EXIT;
 }
@@ -312,7 +312,7 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
        bool                     added = false;
 
        lqe_getref(lqe);
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
+       spin_lock(&qsd->qsd_adjust_lock);
 
        /* the lqe is being queued for the per-ID lock cancel, we should
         * cancel the lock cancel and re-add it for quota adjust */
@@ -337,7 +337,7 @@ void qsd_adjust_schedule(struct lquota_entry *lqe, bool defer, bool cancel)
                        cfs_list_add(&lqe->lqe_link, &qsd->qsd_adjust_list);
                added = true;
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 
        if (added)
                cfs_waitq_signal(&qsd->qsd_upd_thread.t_ctl_waitq);
@@ -356,7 +356,7 @@ static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
        LASSERT(cfs_list_empty(upd));
        *uptodate = true;
 
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
+       spin_lock(&qsd->qsd_adjust_lock);
        if (!cfs_list_empty(&qsd->qsd_adjust_list)) {
                struct lquota_entry *lqe;
                lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
@@ -365,9 +365,9 @@ static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
                                        cfs_time_current_64()))
                        job_pending = true;
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 
-       cfs_write_lock(&qsd->qsd_lock);
+       write_lock(&qsd->qsd_lock);
        if (!cfs_list_empty(&qsd->qsd_upd_list)) {
                cfs_list_splice_init(&qsd->qsd_upd_list, upd);
                job_pending = true;
@@ -386,7 +386,7 @@ static bool qsd_job_pending(struct qsd_instance *qsd, cfs_list_t *upd,
                        *uptodate = false;
        }
 
-       cfs_write_unlock(&qsd->qsd_lock);
+       write_unlock(&qsd->qsd_lock);
        return job_pending;
 }
 
@@ -435,7 +435,7 @@ static int qsd_upd_thread(void *arg)
                        qsd_upd_free(upd);
                }
 
-               cfs_spin_lock(&qsd->qsd_adjust_lock);
+               spin_lock(&qsd->qsd_adjust_lock);
                cur_time = cfs_time_current_64();
                cfs_list_for_each_entry_safe(lqe, tmp, &qsd->qsd_adjust_list,
                                             lqe_link) {
@@ -445,7 +445,7 @@ static int qsd_upd_thread(void *arg)
                                break;
 
                        cfs_list_del_init(&lqe->lqe_link);
-                       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+                       spin_unlock(&qsd->qsd_adjust_lock);
 
                        if (thread_is_running(thread) && uptodate) {
                                qsd_refresh_usage(env, lqe);
@@ -456,9 +456,9 @@ static int qsd_upd_thread(void *arg)
                        }
 
                        lqe_putref(lqe);
-                       cfs_spin_lock(&qsd->qsd_adjust_lock);
+                       spin_lock(&qsd->qsd_adjust_lock);
                }
-               cfs_spin_unlock(&qsd->qsd_adjust_lock);
+               spin_unlock(&qsd->qsd_adjust_lock);
 
                if (!thread_is_running(thread))
                        break;
@@ -507,7 +507,7 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                if (qqi == NULL)
                        continue;
 
-               cfs_write_lock(&qsd->qsd_lock);
+               write_lock(&qsd->qsd_lock);
                cfs_list_for_each_entry_safe(upd, tmp, &qqi->qqi_deferred_glb,
                                             qur_link) {
                        CWARN("%s: Free global deferred upd: ID:"LPU64", "
@@ -526,7 +526,7 @@ static void qsd_cleanup_deferred(struct qsd_instance *qsd)
                        list_del_init(&upd->qur_link);
                        qsd_upd_free(upd);
                }
-               cfs_write_unlock(&qsd->qsd_lock);
+               write_unlock(&qsd->qsd_lock);
        }
 }
 
@@ -534,14 +534,14 @@ static void qsd_cleanup_adjust(struct qsd_instance *qsd)
 {
        struct lquota_entry     *lqe;
 
-       cfs_spin_lock(&qsd->qsd_adjust_lock);
+       spin_lock(&qsd->qsd_adjust_lock);
        while (!cfs_list_empty(&qsd->qsd_adjust_list)) {
                lqe = cfs_list_entry(qsd->qsd_adjust_list.next,
                                     struct lquota_entry, lqe_link);
                cfs_list_del_init(&lqe->lqe_link);
                lqe_putref(lqe);
        }
-       cfs_spin_unlock(&qsd->qsd_adjust_lock);
+       spin_unlock(&qsd->qsd_adjust_lock);
 }
 
 void qsd_stop_upd_thread(struct qsd_instance *qsd)
index d9021f0..d6a2b21 100644 (file)
@@ -93,7 +93,7 @@ void tgt_client_free(struct obd_export *exp)
                return;
        /* Clear bit when lcd is freed */
        LASSERT(lut->lut_client_bitmap);
-       if (!cfs_test_and_clear_bit(ted->ted_lr_idx, lut->lut_client_bitmap)) {
+       if (!test_and_clear_bit(ted->ted_lr_idx, lut->lut_client_bitmap)) {
                CERROR("%s: client %u bit already clear in bitmap\n",
                       exp->exp_obd->obd_name, ted->ted_lr_idx);
                LBUG();
@@ -176,9 +176,9 @@ int tgt_client_data_update(const struct lu_env *env, struct obd_export *exp)
                /* can't add callback, do sync now */
                th->th_sync = 1;
        } else {
-               cfs_spin_lock(&exp->exp_lock);
+               spin_lock(&exp->exp_lock);
                exp->exp_need_sync = 1;
-               cfs_spin_unlock(&exp->exp_lock);
+               spin_unlock(&exp->exp_lock);
        }
 
        tti->tti_off = ted->ted_lr_off;
@@ -253,9 +253,9 @@ int tgt_server_data_update(const struct lu_env *env, struct lu_target *tgt,
               tgt->lut_last_transno);
 
        /* Always save latest transno to keep it fresh */
-       cfs_spin_lock(&tgt->lut_translock);
+       spin_lock(&tgt->lut_translock);
        tgt->lut_lsd.lsd_last_transno = tgt->lut_last_transno;
-       cfs_spin_unlock(&tgt->lut_translock);
+       spin_unlock(&tgt->lut_translock);
 
        th = dt_trans_create(env, tgt->lut_bottom);
        if (IS_ERR(th))
@@ -355,20 +355,20 @@ void tgt_boot_epoch_update(struct lu_target *tgt)
                return;
        }
 
-       cfs_spin_lock(&tgt->lut_translock);
+       spin_lock(&tgt->lut_translock);
        start_epoch = lr_epoch(tgt->lut_last_transno) + 1;
        tgt->lut_last_transno = (__u64)start_epoch << LR_EPOCH_BITS;
        tgt->lut_lsd.lsd_start_epoch = start_epoch;
-       cfs_spin_unlock(&tgt->lut_translock);
+       spin_unlock(&tgt->lut_translock);
 
        CFS_INIT_LIST_HEAD(&client_list);
        /**
         * The recovery is not yet finished and final queue can still be updated
         * with resend requests. Move final list to separate one for processing
         */
-       cfs_spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
+       spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
        cfs_list_splice_init(&tgt->lut_obd->obd_final_req_queue, &client_list);
-       cfs_spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
+       spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
 
        /**
         * go through list of exports participated in recovery and
@@ -380,9 +380,9 @@ void tgt_boot_epoch_update(struct lu_target *tgt)
                        tgt_client_epoch_update(&env, req->rq_export);
        }
        /** return list back at once */
-       cfs_spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
+       spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
        cfs_list_splice_init(&client_list, &tgt->lut_obd->obd_final_req_queue);
-       cfs_spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
+       spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
        /** update server epoch */
        tgt_server_data_update(&env, tgt, 1);
        lu_env_fini(&env);
@@ -409,17 +409,17 @@ void tgt_cb_last_committed(struct lu_env *env, struct thandle *th,
        LASSERT(ccb->llcc_tgt != NULL);
        LASSERT(ccb->llcc_exp->exp_obd == ccb->llcc_tgt->lut_obd);
 
-       cfs_spin_lock(&ccb->llcc_tgt->lut_translock);
+       spin_lock(&ccb->llcc_tgt->lut_translock);
        if (ccb->llcc_transno > ccb->llcc_tgt->lut_obd->obd_last_committed)
                ccb->llcc_tgt->lut_obd->obd_last_committed = ccb->llcc_transno;
 
        LASSERT(ccb->llcc_exp);
        if (ccb->llcc_transno > ccb->llcc_exp->exp_last_committed) {
                ccb->llcc_exp->exp_last_committed = ccb->llcc_transno;
-               cfs_spin_unlock(&ccb->llcc_tgt->lut_translock);
+               spin_unlock(&ccb->llcc_tgt->lut_translock);
                ptlrpc_commit_replies(ccb->llcc_exp);
        } else {
-               cfs_spin_unlock(&ccb->llcc_tgt->lut_translock);
+               spin_unlock(&ccb->llcc_tgt->lut_translock);
        }
        class_export_cb_put(ccb->llcc_exp);
        if (ccb->llcc_transno)
@@ -481,9 +481,9 @@ void tgt_cb_new_client(struct lu_env *env, struct thandle *th,
               ccb->lncc_exp->exp_obd->obd_name,
               ccb->lncc_exp->exp_client_uuid.uuid);
 
-       cfs_spin_lock(&ccb->lncc_exp->exp_lock);
+       spin_lock(&ccb->lncc_exp->exp_lock);
        ccb->lncc_exp->exp_need_sync = 0;
-       cfs_spin_unlock(&ccb->lncc_exp->exp_lock);
+       spin_unlock(&ccb->lncc_exp->exp_lock);
        class_export_cb_put(ccb->lncc_exp);
 
        OBD_FREE_PTR(ccb);
@@ -533,7 +533,7 @@ int tgt_client_new(const struct lu_env *env, struct obd_export *exp)
        if (!strcmp(ted->ted_lcd->lcd_uuid, tgt->lut_obd->obd_uuid.uuid))
                RETURN(0);
 
-       cfs_mutex_init(&ted->ted_lcd_lock);
+       mutex_init(&ted->ted_lcd_lock);
 
        if ((exp->exp_connect_flags & OBD_CONNECT_LIGHTWEIGHT) != 0)
                RETURN(0);
@@ -541,7 +541,7 @@ int tgt_client_new(const struct lu_env *env, struct obd_export *exp)
        /* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
         * there's no need for extra complication here
         */
-       idx = cfs_find_first_zero_bit(tgt->lut_client_bitmap, LR_MAX_CLIENTS);
+       idx = find_first_zero_bit(tgt->lut_client_bitmap, LR_MAX_CLIENTS);
 repeat:
        if (idx >= LR_MAX_CLIENTS ||
            OBD_FAIL_CHECK(OBD_FAIL_MDS_CLIENT_ADD)) {
@@ -549,8 +549,8 @@ repeat:
                       tgt->lut_obd->obd_name,  idx);
                RETURN(-EOVERFLOW);
        }
-       if (cfs_test_and_set_bit(idx, tgt->lut_client_bitmap)) {
-               idx = cfs_find_next_zero_bit(tgt->lut_client_bitmap,
+       if (test_and_set_bit(idx, tgt->lut_client_bitmap)) {
+               idx = find_next_zero_bit(tgt->lut_client_bitmap,
                                             LR_MAX_CLIENTS, idx);
                goto repeat;
        }
@@ -602,7 +602,7 @@ int tgt_client_add(const struct lu_env *env,  struct obd_export *exp, int idx)
            (exp->exp_connect_flags & OBD_CONNECT_LIGHTWEIGHT) != 0)
                RETURN(0);
 
-       if (cfs_test_and_set_bit(idx, tgt->lut_client_bitmap)) {
+       if (test_and_set_bit(idx, tgt->lut_client_bitmap)) {
                CERROR("%s: client %d: bit already set in bitmap!!\n",
                       tgt->lut_obd->obd_name,  idx);
                LBUG();
@@ -615,7 +615,7 @@ int tgt_client_add(const struct lu_env *env,  struct obd_export *exp, int idx)
        ted->ted_lr_off = tgt->lut_lsd.lsd_client_start +
                          idx * tgt->lut_lsd.lsd_client_size;
 
-       cfs_mutex_init(&ted->ted_lcd_lock);
+       mutex_init(&ted->ted_lcd_lock);
 
        LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
 
@@ -645,7 +645,7 @@ int tgt_client_del(const struct lu_env *env, struct obd_export *exp)
 
        /* Clear the bit _after_ zeroing out the client so we don't
           race with filter_client_add and zero out new clients.*/
-       if (!cfs_test_bit(ted->ted_lr_idx, tgt->lut_client_bitmap)) {
+       if (!test_bit(ted->ted_lr_idx, tgt->lut_client_bitmap)) {
                CERROR("%s: client %u: bit already clear in bitmap!!\n",
                       tgt->lut_obd->obd_name, ted->ted_lr_idx);
                LBUG();
@@ -666,10 +666,10 @@ int tgt_client_del(const struct lu_env *env, struct obd_export *exp)
                RETURN(rc);
        }
 
-       cfs_mutex_lock(&ted->ted_lcd_lock);
+       mutex_lock(&ted->ted_lcd_lock);
        memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
        rc = tgt_client_data_update(env, exp);
-       cfs_mutex_unlock(&ted->ted_lcd_lock);
+       mutex_unlock(&ted->ted_lcd_lock);
 
        CDEBUG(rc == 0 ? D_INFO : D_ERROR,
               "%s: zeroing out client %s at idx %u (%llu), rc %d\n",
index d7b6500..9b06480 100644 (file)
@@ -56,7 +56,7 @@ int tgt_init(const struct lu_env *env, struct lu_target *lut,
        obd->u.obt.obt_lut = lut;
        obd->u.obt.obt_magic = OBT_MAGIC;
 
-       cfs_spin_lock_init(&lut->lut_translock);
+       spin_lock_init(&lut->lut_translock);
 
        OBD_ALLOC(lut->lut_client_bitmap, LR_MAX_CLIENTS >> 3);
        if (lut->lut_client_bitmap == NULL)